View | Details | Raw Unified | Return to bug 236922 | Differences between
and this patch

Collapse All | Expand All

(-)sys/conf/files (+3 lines)
Lines 3478-3483 Link Here
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3478
dev/virtio/virtio_bus_if.m		optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3479
dev/virtio/virtio_if.m			optional	virtio
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3480
dev/virtio/pci/virtio_pci.c		optional	virtio_pci
3481
dev/virtio/pci/virtio_pci_if.m          optional        virtio_pci
3482
dev/virtio/pci/virtio_pci_legacy.c      optional        virtio_pci
3483
dev/virtio/pci/virtio_pci_modern.c      optional        virtio_pci
3481
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3484
dev/virtio/mmio/virtio_mmio.c		optional	virtio_mmio fdt
3482
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3485
dev/virtio/mmio/virtio_mmio_if.m	optional	virtio_mmio fdt
3483
dev/virtio/network/if_vtnet.c		optional	vtnet
3486
dev/virtio/network/if_vtnet.c		optional	vtnet
(-)sys/modules/virtio/pci/Makefile (+2 lines)
Lines 27-32 Link Here
27
KMOD=  virtio_pci
27
KMOD=  virtio_pci
28
SRCS=  virtio_pci.c
28
SRCS=  virtio_pci.c
29
SRCS+= virtio_pci_legacy.c virtio_pci_modern.c
30
SRCS+= virtio_pci_if.c virtio_pci_if.h
29
SRCS+= virtio_bus_if.h virtio_if.h
31
SRCS+= virtio_bus_if.h virtio_if.h
30
SRCS+= bus_if.h device_if.h pci_if.h
32
SRCS+= bus_if.h device_if.h pci_if.h
31
33
(-)sys/dev/virtio.ori/balloon/virtio_balloon.c (-49 / +15 lines)
Lines 80-86 Link Here
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
80
static struct virtio_feature_desc vtballoon_feature_desc[] = {
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
81
	{ VIRTIO_BALLOON_F_MUST_TELL_HOST,	"MustTellHost"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
82
	{ VIRTIO_BALLOON_F_STATS_VQ,		"StatsVq"	},
83
	{ VIRTIO_BALLOON_F_DEFLATE_ON_OOM,	"DeflateOnOOM"	},
84
83
85
	{ 0, NULL }
84
	{ 0, NULL }
86
};
85
};
Lines 90-97 Link Here
90
static int	vtballoon_detach(device_t);
89
static int	vtballoon_detach(device_t);
91
static int	vtballoon_config_change(device_t);
90
static int	vtballoon_config_change(device_t);
92
91
93
static int	vtballoon_negotiate_features(struct vtballoon_softc *);
92
static void	vtballoon_negotiate_features(struct vtballoon_softc *);
94
static int	vtballoon_setup_features(struct vtballoon_softc *);
95
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
93
static int	vtballoon_alloc_virtqueues(struct vtballoon_softc *);
96
94
97
static void	vtballoon_vq_intr(void *);
95
static void	vtballoon_vq_intr(void *);
Lines 111-123 Link Here
111
109
112
static int	vtballoon_sleep(struct vtballoon_softc *);
110
static int	vtballoon_sleep(struct vtballoon_softc *);
113
static void	vtballoon_thread(void *);
111
static void	vtballoon_thread(void *);
114
static void	vtballoon_setup_sysctl(struct vtballoon_softc *);
112
static void	vtballoon_add_sysctl(struct vtballoon_softc *);
115
113
116
#define vtballoon_modern(_sc) \
117
    (((_sc)->vtballoon_features & VIRTIO_F_VERSION_1) != 0)
118
119
/* Features desired/implemented by this driver. */
114
/* Features desired/implemented by this driver. */
120
#define VTBALLOON_FEATURES		VIRTIO_BALLOON_F_MUST_TELL_HOST
115
#define VTBALLOON_FEATURES		0
121
116
122
/* Timeout between retries when the balloon needs inflating. */
117
/* Timeout between retries when the balloon needs inflating. */
123
#define VTBALLOON_LOWMEM_TIMEOUT	hz
118
#define VTBALLOON_LOWMEM_TIMEOUT	hz
Lines 158-167 Link Here
158
};
153
};
159
static devclass_t vtballoon_devclass;
154
static devclass_t vtballoon_devclass;
160
155
161
DRIVER_MODULE(virtio_balloon, vtpcil, vtballoon_driver,
156
DRIVER_MODULE(virtio_balloon, virtio_pci, vtballoon_driver,
162
    vtballoon_devclass, 0, 0);
157
    vtballoon_devclass, 0, 0);
163
DRIVER_MODULE(virtio_balloon, vtpcim, vtballoon_driver,
164
    vtballoon_devclass, 0, 0);
165
MODULE_VERSION(virtio_balloon, 1);
158
MODULE_VERSION(virtio_balloon, 1);
166
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
159
MODULE_DEPEND(virtio_balloon, virtio, 1, 1, 1);
167
160
Lines 185-202 Link Here
185
178
186
	sc = device_get_softc(dev);
179
	sc = device_get_softc(dev);
187
	sc->vtballoon_dev = dev;
180
	sc->vtballoon_dev = dev;
188
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
189
181
190
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
182
	VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev));
191
	TAILQ_INIT(&sc->vtballoon_pages);
183
	TAILQ_INIT(&sc->vtballoon_pages);
192
184
193
	vtballoon_setup_sysctl(sc);
185
	vtballoon_add_sysctl(sc);
194
186
195
	error = vtballoon_setup_features(sc);
187
	virtio_set_feature_desc(dev, vtballoon_feature_desc);
196
	if (error) {
188
	vtballoon_negotiate_features(sc);
197
		device_printf(dev, "cannot setup features\n");
198
		goto fail;
199
	}
200
189
201
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
190
	sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST *
202
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
191
	    sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO);
Lines 282-313 Link Here
282
	return (1);
271
	return (1);
283
}
272
}
284
273
285
static int
274
static void
286
vtballoon_negotiate_features(struct vtballoon_softc *sc)
275
vtballoon_negotiate_features(struct vtballoon_softc *sc)
287
{
276
{
288
	device_t dev;
277
	device_t dev;
289
	uint64_t features;
278
	uint64_t features;
290
279
291
	dev = sc->vtballoon_dev;
280
	dev = sc->vtballoon_dev;
292
	features = VTBALLOON_FEATURES;
281
	features = virtio_negotiate_features(dev, VTBALLOON_FEATURES);
293
282
	sc->vtballoon_features = features;
294
	sc->vtballoon_features = virtio_negotiate_features(dev, features);
295
	return (virtio_finalize_features(dev));
296
}
283
}
297
284
298
static int
285
static int
299
vtballoon_setup_features(struct vtballoon_softc *sc)
300
{
301
	int error;
302
303
	error = vtballoon_negotiate_features(sc);
304
	if (error)
305
		return (error);
306
307
	return (0);
308
}
309
310
static int
311
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
286
vtballoon_alloc_virtqueues(struct vtballoon_softc *sc)
312
{
287
{
313
	device_t dev;
288
	device_t dev;
Lines 465-472 Link Here
465
{
440
{
466
	vm_page_t m;
441
	vm_page_t m;
467
442
468
	m = vm_page_alloc(NULL, 0,
443
	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
469
	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP);
470
	if (m != NULL)
444
	if (m != NULL)
471
		sc->vtballoon_current_npages++;
445
		sc->vtballoon_current_npages++;
472
446
Lines 489-512 Link Here
489
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
463
	desired = virtio_read_dev_config_4(sc->vtballoon_dev,
490
	    offsetof(struct virtio_balloon_config, num_pages));
464
	    offsetof(struct virtio_balloon_config, num_pages));
491
465
492
	if (vtballoon_modern(sc))
466
	return (le32toh(desired));
493
		return (desired);
494
	else
495
		return (le32toh(desired));
496
}
467
}
497
468
498
static void
469
static void
499
vtballoon_update_size(struct vtballoon_softc *sc)
470
vtballoon_update_size(struct vtballoon_softc *sc)
500
{
471
{
501
	uint32_t npages;
502
472
503
	npages = sc->vtballoon_current_npages;
504
	if (!vtballoon_modern(sc))
505
		npages = htole32(npages);
506
507
	virtio_write_dev_config_4(sc->vtballoon_dev,
473
	virtio_write_dev_config_4(sc->vtballoon_dev,
508
	    offsetof(struct virtio_balloon_config, actual), npages);
474
	    offsetof(struct virtio_balloon_config, actual),
509
475
	    htole32(sc->vtballoon_current_npages));
510
}
476
}
511
477
512
static int
478
static int
Lines 578-584 Link Here
578
}
544
}
579
545
580
static void
546
static void
581
vtballoon_setup_sysctl(struct vtballoon_softc *sc)
547
vtballoon_add_sysctl(struct vtballoon_softc *sc)
582
{
548
{
583
	device_t dev;
549
	device_t dev;
584
	struct sysctl_ctx_list *ctx;
550
	struct sysctl_ctx_list *ctx;
(-)sys/dev/virtio.ori/balloon/virtio_balloon.h (-27 / +1 lines)
Lines 36-42 Link Here
36
/* Feature bits. */
36
/* Feature bits. */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
37
#define VIRTIO_BALLOON_F_MUST_TELL_HOST	0x1 /* Tell before reclaiming pages */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
38
#define VIRTIO_BALLOON_F_STATS_VQ	0x2 /* Memory stats virtqueue */
39
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	0x4 /* Deflate balloon on OOM */
40
39
41
/* Size of a PFN in the balloon interface. */
40
/* Size of a PFN in the balloon interface. */
42
#define VIRTIO_BALLOON_PFN_SHIFT 12
41
#define VIRTIO_BALLOON_PFN_SHIFT 12
Lines 55-87 Link Here
55
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
54
#define VIRTIO_BALLOON_S_MINFLT   3   /* Number of minor faults */
56
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
55
#define VIRTIO_BALLOON_S_MEMFREE  4   /* Total amount of free memory */
57
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
56
#define VIRTIO_BALLOON_S_MEMTOT   5   /* Total amount of memory */
58
#define VIRTIO_BALLOON_S_AVAIL    6   /* Available memory as in /proc */
57
#define VIRTIO_BALLOON_S_NR       6
59
#define VIRTIO_BALLOON_S_CACHES   7   /* Disk caches */
60
#define VIRTIO_BALLOON_S_NR       8
61
58
62
/*
63
 * Memory statistics structure.
64
 * Driver fills an array of these structures and passes to device.
65
 *
66
 * NOTE: fields are laid out in a way that would make compiler add padding
67
 * between and after fields, so we have to use compiler-specific attributes to
68
 * pack it, to disable this padding. This also often causes compiler to
69
 * generate suboptimal code.
70
 *
71
 * We maintain this statistics structure format for backwards compatibility,
72
 * but don't follow this example.
73
 *
74
 * If implementing a similar structure, do something like the below instead:
75
 *     struct virtio_balloon_stat {
76
 *         __virtio16 tag;
77
 *         __u8 reserved[6];
78
 *         __virtio64 val;
79
 *     };
80
 *
81
 * In other words, add explicit reserved fields to align field and
82
 * structure boundaries at field size, avoiding compiler padding
83
 * without the packed attribute.
84
 */
85
struct virtio_balloon_stat {
59
struct virtio_balloon_stat {
86
	uint16_t tag;
60
	uint16_t tag;
87
	uint64_t val;
61
	uint64_t val;
(-)sys/dev/virtio.ori/block/virtio_blk.c (-95 / +64 lines)
Lines 76-85 Link Here
76
	uint64_t		 vtblk_features;
76
	uint64_t		 vtblk_features;
77
	uint32_t		 vtblk_flags;
77
	uint32_t		 vtblk_flags;
78
#define VTBLK_FLAG_INDIRECT	0x0001
78
#define VTBLK_FLAG_INDIRECT	0x0001
79
#define VTBLK_FLAG_DETACH	0x0002
79
#define VTBLK_FLAG_READONLY	0x0002
80
#define VTBLK_FLAG_SUSPEND	0x0004
80
#define VTBLK_FLAG_DETACH	0x0004
81
#define VTBLK_FLAG_BARRIER	0x0008
81
#define VTBLK_FLAG_SUSPEND	0x0008
82
#define VTBLK_FLAG_WCE_CONFIG	0x0010
82
#define VTBLK_FLAG_BARRIER	0x0010
83
#define VTBLK_FLAG_WC_CONFIG	0x0020
83
84
84
	struct virtqueue	*vtblk_vq;
85
	struct virtqueue	*vtblk_vq;
85
	struct sglist		*vtblk_sglist;
86
	struct sglist		*vtblk_sglist;
Lines 108-117 Link Here
108
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
109
	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
109
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
110
	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
110
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
111
	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
111
	{ VIRTIO_BLK_F_FLUSH,		"FlushCmd"	},
112
	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
112
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
113
	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
113
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
114
	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
114
	{ VIRTIO_BLK_F_MQ,		"Multiqueue"	},
115
115
116
	{ 0, NULL }
116
	{ 0, NULL }
117
};
117
};
Lines 133-140 Link Here
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
133
static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
134
static void	vtblk_strategy(struct bio *);
134
static void	vtblk_strategy(struct bio *);
135
135
136
static int	vtblk_negotiate_features(struct vtblk_softc *);
136
static void	vtblk_negotiate_features(struct vtblk_softc *);
137
static int	vtblk_setup_features(struct vtblk_softc *);
137
static void	vtblk_setup_features(struct vtblk_softc *);
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
138
static int	vtblk_maximum_segments(struct vtblk_softc *,
139
		    struct virtio_blk_config *);
139
		    struct virtio_blk_config *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
140
static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
Lines 193-206 Link Here
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
193
static void	vtblk_setup_sysctl(struct vtblk_softc *);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
194
static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
195
195
196
#define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
197
#define vtblk_htog16(_sc, _val)	virtio_htog16(vtblk_modern(_sc), _val)
198
#define vtblk_htog32(_sc, _val)	virtio_htog32(vtblk_modern(_sc), _val)
199
#define vtblk_htog64(_sc, _val)	virtio_htog64(vtblk_modern(_sc), _val)
200
#define vtblk_gtoh16(_sc, _val)	virtio_gtoh16(vtblk_modern(_sc), _val)
201
#define vtblk_gtoh32(_sc, _val)	virtio_gtoh32(vtblk_modern(_sc), _val)
202
#define vtblk_gtoh64(_sc, _val)	virtio_gtoh64(vtblk_modern(_sc), _val)
203
204
/* Tunables. */
196
/* Tunables. */
205
static int vtblk_no_ident = 0;
197
static int vtblk_no_ident = 0;
206
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
198
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
Lines 208-227 Link Here
208
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
200
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
209
201
210
/* Features desired/implemented by this driver. */
202
/* Features desired/implemented by this driver. */
211
#define VTBLK_COMMON_FEATURES \
203
#define VTBLK_FEATURES \
212
    (VIRTIO_BLK_F_SIZE_MAX		| \
204
    (VIRTIO_BLK_F_BARRIER		| \
205
     VIRTIO_BLK_F_SIZE_MAX		| \
213
     VIRTIO_BLK_F_SEG_MAX		| \
206
     VIRTIO_BLK_F_SEG_MAX		| \
214
     VIRTIO_BLK_F_GEOMETRY		| \
207
     VIRTIO_BLK_F_GEOMETRY		| \
215
     VIRTIO_BLK_F_RO			| \
208
     VIRTIO_BLK_F_RO			| \
216
     VIRTIO_BLK_F_BLK_SIZE		| \
209
     VIRTIO_BLK_F_BLK_SIZE		| \
217
     VIRTIO_BLK_F_FLUSH			| \
210
     VIRTIO_BLK_F_WCE			| \
218
     VIRTIO_BLK_F_TOPOLOGY		| \
211
     VIRTIO_BLK_F_TOPOLOGY		| \
219
     VIRTIO_BLK_F_CONFIG_WCE		| \
212
     VIRTIO_BLK_F_CONFIG_WCE		| \
220
     VIRTIO_RING_F_INDIRECT_DESC)
213
     VIRTIO_RING_F_INDIRECT_DESC)
221
214
222
#define VTBLK_MODERN_FEATURES 	(VTBLK_COMMON_FEATURES)
223
#define VTBLK_LEGACY_FEATURES	(VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
224
225
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
215
#define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
226
#define VTBLK_LOCK_INIT(_sc, _name) \
216
#define VTBLK_LOCK_INIT(_sc, _name) \
227
				mtx_init(VTBLK_MTX((_sc)), (_name), \
217
				mtx_init(VTBLK_MTX((_sc)), (_name), \
Lines 266-275 Link Here
266
256
267
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
257
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
268
    vtblk_modevent, 0);
258
    vtblk_modevent, 0);
269
DRIVER_MODULE(virtio_blk, vtpcil, vtblk_driver, vtblk_devclass,
259
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
270
    vtblk_modevent, 0);
260
    vtblk_modevent, 0);
271
DRIVER_MODULE(virtio_blk, vtpcim, vtblk_driver, vtblk_devclass,
272
    vtblk_modevent, 0);
273
MODULE_VERSION(virtio_blk, 1);
261
MODULE_VERSION(virtio_blk, 1);
274
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
262
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
275
263
Lines 313-322 Link Here
313
	struct virtio_blk_config blkcfg;
301
	struct virtio_blk_config blkcfg;
314
	int error;
302
	int error;
315
303
316
	sc = device_get_softc(dev);
317
	sc->vtblk_dev = dev;
318
	virtio_set_feature_desc(dev, vtblk_feature_desc);
304
	virtio_set_feature_desc(dev, vtblk_feature_desc);
319
305
306
	sc = device_get_softc(dev);
307
	sc->vtblk_dev = dev;
320
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
308
	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
321
	bioq_init(&sc->vtblk_bioq);
309
	bioq_init(&sc->vtblk_bioq);
322
	TAILQ_INIT(&sc->vtblk_dump_queue);
310
	TAILQ_INIT(&sc->vtblk_dump_queue);
Lines 324-336 Link Here
324
	TAILQ_INIT(&sc->vtblk_req_ready);
312
	TAILQ_INIT(&sc->vtblk_req_ready);
325
313
326
	vtblk_setup_sysctl(sc);
314
	vtblk_setup_sysctl(sc);
315
	vtblk_setup_features(sc);
327
316
328
	error = vtblk_setup_features(sc);
329
	if (error) {
330
		device_printf(dev, "cannot setup features\n");
331
		goto fail;
332
	}
333
334
	vtblk_read_config(sc, &blkcfg);
317
	vtblk_read_config(sc, &blkcfg);
335
318
336
	/*
319
	/*
Lines 558-563 Link Here
558
		return;
541
		return;
559
	}
542
	}
560
543
544
	/*
545
	 * Fail any write if RO. Unfortunately, there does not seem to
546
	 * be a better way to report our readonly'ness to GEOM above.
547
	 */
548
	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
549
	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
550
		vtblk_bio_done(sc, bp, EROFS);
551
		return;
552
	}
553
561
	VTBLK_LOCK(sc);
554
	VTBLK_LOCK(sc);
562
555
563
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
556
	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
Lines 572-611 Link Here
572
	VTBLK_UNLOCK(sc);
565
	VTBLK_UNLOCK(sc);
573
}
566
}
574
567
575
static int
568
static void
576
vtblk_negotiate_features(struct vtblk_softc *sc)
569
vtblk_negotiate_features(struct vtblk_softc *sc)
577
{
570
{
578
	device_t dev;
571
	device_t dev;
579
	uint64_t features;
572
	uint64_t features;
580
573
581
	dev = sc->vtblk_dev;
574
	dev = sc->vtblk_dev;
582
	features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
575
	features = VTBLK_FEATURES;
583
	    VTBLK_LEGACY_FEATURES;
584
576
585
	sc->vtblk_features = virtio_negotiate_features(dev, features);
577
	sc->vtblk_features = virtio_negotiate_features(dev, features);
586
	return (virtio_finalize_features(dev));
587
}
578
}
588
579
589
static int
580
static void
590
vtblk_setup_features(struct vtblk_softc *sc)
581
vtblk_setup_features(struct vtblk_softc *sc)
591
{
582
{
592
	device_t dev;
583
	device_t dev;
593
	int error;
584
594
	
595
	dev = sc->vtblk_dev;
585
	dev = sc->vtblk_dev;
596
586
597
	error = vtblk_negotiate_features(sc);
587
	vtblk_negotiate_features(sc);
598
	if (error)
599
		return (error);
600
588
601
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
589
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
602
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
590
		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
603
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
591
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
604
		sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
592
		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
605
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
593
	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
606
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Legacy. */
594
		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
607
595
	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
608
	return (0);
596
		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
609
}
597
}
610
598
611
static int
599
static int
Lines 684-702 Link Here
684
	dp->d_name = VTBLK_DISK_NAME;
672
	dp->d_name = VTBLK_DISK_NAME;
685
	dp->d_unit = device_get_unit(dev);
673
	dp->d_unit = device_get_unit(dev);
686
	dp->d_drv1 = sc;
674
	dp->d_drv1 = sc;
687
	dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
675
	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
676
	    DISKFLAG_DIRECT_COMPLETION;
688
	dp->d_hba_vendor = virtio_get_vendor(dev);
677
	dp->d_hba_vendor = virtio_get_vendor(dev);
689
	dp->d_hba_device = virtio_get_device(dev);
678
	dp->d_hba_device = virtio_get_device(dev);
690
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
679
	dp->d_hba_subvendor = virtio_get_subvendor(dev);
691
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
680
	dp->d_hba_subdevice = virtio_get_subdevice(dev);
692
681
693
	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
682
	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
694
		dp->d_flags |= DISKFLAG_WRITE_PROTECT;
695
	else {
696
		if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
697
			dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
698
		dp->d_dump = vtblk_dump;
683
		dp->d_dump = vtblk_dump;
699
	}
700
684
701
	/* Capacity is always in 512-byte units. */
685
	/* Capacity is always in 512-byte units. */
702
	dp->d_mediasize = blkcfg->capacity * 512;
686
	dp->d_mediasize = blkcfg->capacity * 512;
Lines 880-906 Link Here
880
	bp = bioq_takefirst(bioq);
864
	bp = bioq_takefirst(bioq);
881
	req->vbr_bp = bp;
865
	req->vbr_bp = bp;
882
	req->vbr_ack = -1;
866
	req->vbr_ack = -1;
883
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
867
	req->vbr_hdr.ioprio = 1;
884
868
885
	switch (bp->bio_cmd) {
869
	switch (bp->bio_cmd) {
886
	case BIO_FLUSH:
870
	case BIO_FLUSH:
887
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
871
		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
888
		req->vbr_hdr.sector = 0;
889
		break;
872
		break;
890
	case BIO_READ:
873
	case BIO_READ:
891
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
874
		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
892
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
875
		req->vbr_hdr.sector = bp->bio_offset / 512;
893
		break;
876
		break;
894
	case BIO_WRITE:
877
	case BIO_WRITE:
895
		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
878
		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
896
		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / 512);
879
		req->vbr_hdr.sector = bp->bio_offset / 512;
897
		break;
880
		break;
898
	default:
881
	default:
899
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
882
		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
900
	}
883
	}
901
884
902
	if (bp->bio_flags & BIO_ORDERED)
885
	if (bp->bio_flags & BIO_ORDERED)
903
		req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
886
		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
904
887
905
	return (req);
888
	return (req);
906
}
889
}
Lines 931-938 Link Here
931
			if (!virtqueue_empty(vq))
914
			if (!virtqueue_empty(vq))
932
				return (EBUSY);
915
				return (EBUSY);
933
			ordered = 1;
916
			ordered = 1;
934
			req->vbr_hdr.type &= vtblk_gtoh32(sc,
917
			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
935
				~VIRTIO_BLK_T_BARRIER);
936
		}
918
		}
937
	}
919
	}
938
920
Lines 1036-1051 Link Here
1036
static void
1018
static void
1037
vtblk_drain(struct vtblk_softc *sc)
1019
vtblk_drain(struct vtblk_softc *sc)
1038
{
1020
{
1021
	struct bio_queue queue;
1039
	struct bio_queue_head *bioq;
1022
	struct bio_queue_head *bioq;
1040
	struct vtblk_request *req;
1023
	struct vtblk_request *req;
1041
	struct bio *bp;
1024
	struct bio *bp;
1042
1025
1043
	bioq = &sc->vtblk_bioq;
1026
	bioq = &sc->vtblk_bioq;
1027
	TAILQ_INIT(&queue);
1044
1028
1045
	if (sc->vtblk_vq != NULL) {
1029
	if (sc->vtblk_vq != NULL) {
1046
		struct bio_queue queue;
1047
1048
		TAILQ_INIT(&queue);
1049
		vtblk_queue_completed(sc, &queue);
1030
		vtblk_queue_completed(sc, &queue);
1050
		vtblk_done_completed(sc, &queue);
1031
		vtblk_done_completed(sc, &queue);
1051
1032
Lines 1136-1157 Link Here
1136
	/* Read the configuration if the feature was negotiated. */
1117
	/* Read the configuration if the feature was negotiated. */
1137
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1118
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1138
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1119
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1139
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1120
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1140
	    geometry.cylinders, blkcfg);
1141
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1142
	    geometry.heads, blkcfg);
1143
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1144
	    geometry.sectors, blkcfg);
1145
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1121
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1146
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1122
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1147
	    topology.physical_block_exp, blkcfg);
1123
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1148
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1149
	    topology.alignment_offset, blkcfg);
1150
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1151
	    topology.min_io_size, blkcfg);
1152
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1153
	    topology.opt_io_size, blkcfg);
1154
	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1155
}
1124
}
1156
1125
1157
#undef VTBLK_GET_CONFIG
1126
#undef VTBLK_GET_CONFIG
Lines 1175-1182 Link Here
1175
		return;
1144
		return;
1176
1145
1177
	req->vbr_ack = -1;
1146
	req->vbr_ack = -1;
1178
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1147
	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1179
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1148
	req->vbr_hdr.ioprio = 1;
1180
	req->vbr_hdr.sector = 0;
1149
	req->vbr_hdr.sector = 0;
1181
1150
1182
	req->vbr_bp = &buf;
1151
	req->vbr_bp = &buf;
Lines 1307-1315 Link Here
1307
1276
1308
	req = &sc->vtblk_dump_request;
1277
	req = &sc->vtblk_dump_request;
1309
	req->vbr_ack = -1;
1278
	req->vbr_ack = -1;
1310
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1279
	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1311
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1280
	req->vbr_hdr.ioprio = 1;
1312
	req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / 512);
1281
	req->vbr_hdr.sector = offset / 512;
1313
1282
1314
	req->vbr_bp = &buf;
1283
	req->vbr_bp = &buf;
1315
	g_reset_bio(&buf);
1284
	g_reset_bio(&buf);
Lines 1329-1336 Link Here
1329
1298
1330
	req = &sc->vtblk_dump_request;
1299
	req = &sc->vtblk_dump_request;
1331
	req->vbr_ack = -1;
1300
	req->vbr_ack = -1;
1332
	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1301
	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1333
	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1302
	req->vbr_hdr.ioprio = 1;
1334
	req->vbr_hdr.sector = 0;
1303
	req->vbr_hdr.sector = 0;
1335
1304
1336
	req->vbr_bp = &buf;
1305
	req->vbr_bp = &buf;
Lines 1358-1364 Link Here
1358
1327
1359
	/* Set either writeback (1) or writethrough (0) mode. */
1328
	/* Set either writeback (1) or writethrough (0) mode. */
1360
	virtio_write_dev_config_1(sc->vtblk_dev,
1329
	virtio_write_dev_config_1(sc->vtblk_dev,
1361
	    offsetof(struct virtio_blk_config, wce), wc);
1330
	    offsetof(struct virtio_blk_config, writeback), wc);
1362
}
1331
}
1363
1332
1364
static int
1333
static int
Lines 1367-1381 Link Here
1367
{
1336
{
1368
	int wc;
1337
	int wc;
1369
1338
1370
	if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1339
	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1371
		wc = vtblk_tunable_int(sc, "writecache_mode",
1340
		wc = vtblk_tunable_int(sc, "writecache_mode",
1372
		    vtblk_writecache_mode);
1341
		    vtblk_writecache_mode);
1373
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1342
		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1374
			vtblk_set_write_cache(sc, wc);
1343
			vtblk_set_write_cache(sc, wc);
1375
		else
1344
		else
1376
			wc = blkcfg->wce;
1345
			wc = blkcfg->writeback;
1377
	} else
1346
	} else
1378
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1347
		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1379
1348
1380
	return (wc);
1349
	return (wc);
1381
}
1350
}
Lines 1392-1398 Link Here
1392
	error = sysctl_handle_int(oidp, &wc, 0, req);
1361
	error = sysctl_handle_int(oidp, &wc, 0, req);
1393
	if (error || req->newptr == NULL)
1362
	if (error || req->newptr == NULL)
1394
		return (error);
1363
		return (error);
1395
	if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1364
	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1396
		return (EPERM);
1365
		return (EPERM);
1397
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1366
	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1398
		return (EINVAL);
1367
		return (EINVAL);
(-)sys/dev/virtio.ori/block/virtio_blk.h (-22 / +5 lines)
Lines 34-55 Link Here
34
#define _VIRTIO_BLK_H
34
#define _VIRTIO_BLK_H
35
35
36
/* Feature bits */
36
/* Feature bits */
37
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
37
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
38
#define VIRTIO_BLK_F_SIZE_MAX	0x0002	/* Indicates maximum segment size */
38
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
39
#define VIRTIO_BLK_F_SEG_MAX	0x0004	/* Indicates maximum # of segments */
39
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
40
#define VIRTIO_BLK_F_GEOMETRY	0x0010	/* Legacy geometry available  */
40
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
41
#define VIRTIO_BLK_F_RO		0x0020	/* Disk is read-only */
41
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
42
#define VIRTIO_BLK_F_BLK_SIZE	0x0040	/* Block size of disk is available*/
42
#define VIRTIO_BLK_F_FLUSH	0x0200	/* Flush command supported */
43
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
44
#define VIRTIO_BLK_F_WCE	0x0200	/* Writeback mode enabled after reset */
43
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
45
#define VIRTIO_BLK_F_TOPOLOGY	0x0400	/* Topology information is available */
44
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
46
#define VIRTIO_BLK_F_CONFIG_WCE 0x0800	/* Writeback mode available in config */
45
#define VIRTIO_BLK_F_MQ 	0x1000 	/* Support more than one vq */
46
47
47
/* Legacy feature bits */
48
#define VIRTIO_BLK_F_BARRIER	0x0001	/* Does host support barriers? */
49
#define VIRTIO_BLK_F_SCSI	0x0080	/* Supports scsi command passthru */
50
51
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
52
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
53
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
48
#define VIRTIO_BLK_ID_BYTES	20	/* ID string length */
54
49
55
struct virtio_blk_config {
50
struct virtio_blk_config {
Lines 71-93 Link Here
71
66
72
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
67
	/* Topology of the device (if VIRTIO_BLK_F_TOPOLOGY) */
73
	struct virtio_blk_topology {
68
	struct virtio_blk_topology {
74
		/* exponent for physical block per logical block. */
75
		uint8_t physical_block_exp;
69
		uint8_t physical_block_exp;
76
		/* alignment offset in logical blocks. */
77
		uint8_t alignment_offset;
70
		uint8_t alignment_offset;
78
		/* minimum I/O size without performance penalty in logical
79
		 * blocks. */
80
		uint16_t min_io_size;
71
		uint16_t min_io_size;
81
		/* optimal sustained I/O size in logical blocks. */
82
		uint32_t opt_io_size;
72
		uint32_t opt_io_size;
83
	} topology;
73
	} topology;
84
74
85
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
75
	/* Writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
86
	uint8_t wce;
76
	uint8_t writeback;
87
	uint8_t unused;
88
77
89
	/* Number of vqs, only available when VIRTIO_BLK_F_MQ is set */
90
	uint16_t num_queues;
91
} __packed;
78
} __packed;
92
79
93
/*
80
/*
Lines 120-130 Link Here
120
/* ID string length */
107
/* ID string length */
121
#define VIRTIO_BLK_ID_BYTES	20
108
#define VIRTIO_BLK_ID_BYTES	20
122
109
123
/*
110
/* This is the first element of the read scatter-gather list. */
124
 * This comes first in the read scatter-gather list.
125
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
126
 * this is the first element of the read scatter-gather list.
127
 */
128
struct virtio_blk_outhdr {
111
struct virtio_blk_outhdr {
129
	/* VIRTIO_BLK_T* */
112
	/* VIRTIO_BLK_T* */
130
	uint32_t type;
113
	uint32_t type;
(-)sys/dev/virtio.ori/console/virtio_console.c (-39 / +16 lines)
Lines 158-165 Link Here
158
static int	 vtcon_detach(device_t);
158
static int	 vtcon_detach(device_t);
159
static int	 vtcon_config_change(device_t);
159
static int	 vtcon_config_change(device_t);
160
160
161
static int	 vtcon_setup_features(struct vtcon_softc *);
161
static void	 vtcon_setup_features(struct vtcon_softc *);
162
static int	 vtcon_negotiate_features(struct vtcon_softc *);
162
static void	 vtcon_negotiate_features(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
163
static int	 vtcon_alloc_scports(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
164
static int	 vtcon_alloc_virtqueues(struct vtcon_softc *);
165
static void	 vtcon_read_config(struct vtcon_softc *,
165
static void	 vtcon_read_config(struct vtcon_softc *,
Lines 227-240 Link Here
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
227
static void	 vtcon_enable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
228
static void	 vtcon_disable_interrupts(struct vtcon_softc *);
229
229
230
#define vtcon_modern(_sc) (((_sc)->vtcon_features & VIRTIO_F_VERSION_1) != 0)
231
#define vtcon_htog16(_sc, _val)	virtio_htog16(vtcon_modern(_sc), _val)
232
#define vtcon_htog32(_sc, _val)	virtio_htog32(vtcon_modern(_sc), _val)
233
#define vtcon_htog64(_sc, _val)	virtio_htog64(vtcon_modern(_sc), _val)
234
#define vtcon_gtoh16(_sc, _val)	virtio_gtoh16(vtcon_modern(_sc), _val)
235
#define vtcon_gtoh32(_sc, _val)	virtio_gtoh32(vtcon_modern(_sc), _val)
236
#define vtcon_gtoh64(_sc, _val)	virtio_gtoh64(vtcon_modern(_sc), _val)
237
238
static int	 vtcon_pending_free;
230
static int	 vtcon_pending_free;
239
231
240
static struct ttydevsw vtcon_tty_class = {
232
static struct ttydevsw vtcon_tty_class = {
Lines 264-273 Link Here
264
};
256
};
265
static devclass_t vtcon_devclass;
257
static devclass_t vtcon_devclass;
266
258
267
DRIVER_MODULE(virtio_console, vtpcil, vtcon_driver, vtcon_devclass,
259
DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass,
268
    vtcon_modevent, 0);
260
    vtcon_modevent, 0);
269
DRIVER_MODULE(virtio_console, vtpcim, vtcon_driver, vtcon_devclass,
270
    vtcon_modevent, 0);
271
MODULE_VERSION(virtio_console, 1);
261
MODULE_VERSION(virtio_console, 1);
272
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
262
MODULE_DEPEND(virtio_console, virtio, 1, 1, 1);
273
263
Lines 333-348 Link Here
333
323
334
	sc = device_get_softc(dev);
324
	sc = device_get_softc(dev);
335
	sc->vtcon_dev = dev;
325
	sc->vtcon_dev = dev;
336
	virtio_set_feature_desc(dev, vtcon_feature_desc);
337
326
338
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
327
	mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF);
339
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
328
	mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF);
340
329
341
	error = vtcon_setup_features(sc);
330
	virtio_set_feature_desc(dev, vtcon_feature_desc);
342
	if (error) {
331
	vtcon_setup_features(sc);
343
		device_printf(dev, "cannot setup features\n");
344
		goto fail;
345
	}
346
332
347
	vtcon_read_config(sc, &concfg);
333
	vtcon_read_config(sc, &concfg);
348
	vtcon_determine_max_ports(sc, &concfg);
334
	vtcon_determine_max_ports(sc, &concfg);
Lines 434-440 Link Here
434
	return (0);
420
	return (0);
435
}
421
}
436
422
437
static int
423
static void
438
vtcon_negotiate_features(struct vtcon_softc *sc)
424
vtcon_negotiate_features(struct vtcon_softc *sc)
439
{
425
{
440
	device_t dev;
426
	device_t dev;
Lines 444-470 Link Here
444
	features = VTCON_FEATURES;
430
	features = VTCON_FEATURES;
445
431
446
	sc->vtcon_features = virtio_negotiate_features(dev, features);
432
	sc->vtcon_features = virtio_negotiate_features(dev, features);
447
	return (virtio_finalize_features(dev));
448
}
433
}
449
434
450
static int
435
static void
451
vtcon_setup_features(struct vtcon_softc *sc)
436
vtcon_setup_features(struct vtcon_softc *sc)
452
{
437
{
453
	device_t dev;
438
	device_t dev;
454
	int error;
455
439
456
	dev = sc->vtcon_dev;
440
	dev = sc->vtcon_dev;
457
441
458
	error = vtcon_negotiate_features(sc);
442
	vtcon_negotiate_features(sc);
459
	if (error)
460
		return (error);
461
443
462
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
444
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE))
463
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
445
		sc->vtcon_flags |= VTCON_FLAG_SIZE;
464
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
446
	if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT))
465
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
447
		sc->vtcon_flags |= VTCON_FLAG_MULTIPORT;
466
467
	return (0);
468
}
448
}
469
449
470
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
450
#define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg)			\
Lines 867-886 Link Here
867
    struct virtio_console_control *control, void *data, size_t data_len)
847
    struct virtio_console_control *control, void *data, size_t data_len)
868
{
848
{
869
	device_t dev;
849
	device_t dev;
870
	uint32_t id;
850
	int id;
871
	uint16_t event;
872
851
873
	dev = sc->vtcon_dev;
852
	dev = sc->vtcon_dev;
874
	id = vtcon_htog32(sc, control->id);
853
	id = control->id;
875
	event = vtcon_htog16(sc, control->event);
876
854
877
	if (id >= sc->vtcon_max_ports) {
855
	if (id < 0 || id >= sc->vtcon_max_ports) {
878
		device_printf(dev, "%s: event %d invalid port ID %d\n",
856
		device_printf(dev, "%s: invalid port ID %d\n", __func__, id);
879
		    __func__, event, id);
880
		return;
857
		return;
881
	}
858
	}
882
859
883
	switch (event) {
860
	switch (control->event) {
884
	case VIRTIO_CONSOLE_PORT_ADD:
861
	case VIRTIO_CONSOLE_PORT_ADD:
885
		vtcon_ctrl_port_add_event(sc, id);
862
		vtcon_ctrl_port_add_event(sc, id);
886
		break;
863
		break;
Lines 1008-1016 Link Here
1008
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
985
	if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0)
1009
		return;
986
		return;
1010
987
1011
	control.id = vtcon_gtoh32(sc, portid);
988
	control.id = portid;
1012
	control.event = vtcon_gtoh16(sc, event);
989
	control.event = event;
1013
	control.value = vtcon_gtoh16(sc, value);
990
	control.value = value;
1014
991
1015
	vtcon_ctrl_poll(sc, &control);
992
	vtcon_ctrl_poll(sc, &control);
1016
}
993
}
(-)sys/dev/virtio.ori/mmio/virtio_mmio.c (-4 lines)
Lines 426-435 Link Here
426
	case VIRTIO_IVAR_VENDOR:
426
	case VIRTIO_IVAR_VENDOR:
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
427
		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
428
		break;
428
		break;
429
	case VIRTIO_IVAR_SUBVENDOR:
430
	case VIRTIO_IVAR_MODERN:
431
		*result = 0;
432
		break;
433
	default:
429
	default:
434
		return (ENOENT);
430
		return (ENOENT);
435
	}
431
	}
(-)sys/dev/virtio.ori/network/if_vtnet.c (-1181 / +846 lines)
Lines 69-75 Link Here
69
#include <netinet6/ip6_var.h>
69
#include <netinet6/ip6_var.h>
70
#include <netinet/udp.h>
70
#include <netinet/udp.h>
71
#include <netinet/tcp.h>
71
#include <netinet/tcp.h>
72
#include <netinet/tcp_lro.h>
73
#include <netinet/netdump/netdump.h>
72
#include <netinet/netdump/netdump.h>
74
73
75
#include <machine/bus.h>
74
#include <machine/bus.h>
Lines 86-95 Link Here
86
#include "opt_inet.h"
85
#include "opt_inet.h"
87
#include "opt_inet6.h"
86
#include "opt_inet6.h"
88
87
89
#if defined(INET) || defined(INET6)
90
#include <machine/in_cksum.h>
91
#endif
92
93
static int	vtnet_modevent(module_t, int, void *);
88
static int	vtnet_modevent(module_t, int, void *);
94
89
95
static int	vtnet_probe(device_t);
90
static int	vtnet_probe(device_t);
Lines 101-108 Link Here
101
static int	vtnet_attach_completed(device_t);
96
static int	vtnet_attach_completed(device_t);
102
static int	vtnet_config_change(device_t);
97
static int	vtnet_config_change(device_t);
103
98
104
static int	vtnet_negotiate_features(struct vtnet_softc *);
99
static void	vtnet_negotiate_features(struct vtnet_softc *);
105
static int	vtnet_setup_features(struct vtnet_softc *);
100
static void	vtnet_setup_features(struct vtnet_softc *);
106
static int	vtnet_init_rxq(struct vtnet_softc *, int);
101
static int	vtnet_init_rxq(struct vtnet_softc *, int);
107
static int	vtnet_init_txq(struct vtnet_softc *, int);
102
static int	vtnet_init_txq(struct vtnet_softc *, int);
108
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
103
static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
Lines 110-121 Link Here
110
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
105
static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
111
static void	vtnet_free_rx_filters(struct vtnet_softc *);
106
static void	vtnet_free_rx_filters(struct vtnet_softc *);
112
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
107
static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
113
static int	vtnet_alloc_interface(struct vtnet_softc *);
114
static int	vtnet_setup_interface(struct vtnet_softc *);
108
static int	vtnet_setup_interface(struct vtnet_softc *);
115
static int	vtnet_ioctl_mtu(struct vtnet_softc *, int);
109
static int	vtnet_change_mtu(struct vtnet_softc *, int);
116
static int	vtnet_ioctl_ifflags(struct vtnet_softc *);
117
static int	vtnet_ioctl_multi(struct vtnet_softc *);
118
static int	vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
119
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
110
static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
120
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
111
static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
121
112
Lines 123-137 Link Here
123
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
114
static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
124
static struct mbuf *
115
static struct mbuf *
125
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
116
		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
126
static int	vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
117
static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
127
		    struct mbuf *, int);
118
		    struct mbuf *, int);
128
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
119
static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
129
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
120
static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
130
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
121
static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
131
static int	vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
132
		     uint16_t, int, struct virtio_net_hdr *);
133
static int	vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
134
		     uint16_t, int, struct virtio_net_hdr *);
135
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
122
static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
136
		     struct virtio_net_hdr *);
123
		     struct virtio_net_hdr *);
137
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
124
static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
Lines 143-149 Link Here
143
static void	vtnet_rx_vq_intr(void *);
130
static void	vtnet_rx_vq_intr(void *);
144
static void	vtnet_rxq_tq_intr(void *, int);
131
static void	vtnet_rxq_tq_intr(void *, int);
145
132
146
static int	vtnet_txq_intr_threshold(struct vtnet_txq *);
147
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
133
static int	vtnet_txq_below_threshold(struct vtnet_txq *);
148
static int	vtnet_txq_notify(struct vtnet_txq *);
134
static int	vtnet_txq_notify(struct vtnet_txq *);
149
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
135
static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
Lines 193-199 Link Here
193
static int	vtnet_init_tx_queues(struct vtnet_softc *);
179
static int	vtnet_init_tx_queues(struct vtnet_softc *);
194
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
180
static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
195
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
181
static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
196
static void	vtnet_update_rx_offloads(struct vtnet_softc *);
197
static int	vtnet_reinit(struct vtnet_softc *);
182
static int	vtnet_reinit(struct vtnet_softc *);
198
static void	vtnet_init_locked(struct vtnet_softc *);
183
static void	vtnet_init_locked(struct vtnet_softc *);
199
static void	vtnet_init(void *);
184
static void	vtnet_init(void *);
Lines 202-212 Link Here
202
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
187
static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
203
		    struct sglist *, int, int);
188
		    struct sglist *, int, int);
204
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
189
static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
205
static int	vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
206
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
190
static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
207
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int);
191
static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
208
static int	vtnet_set_promisc(struct vtnet_softc *, int);
192
static int	vtnet_set_promisc(struct vtnet_softc *, int);
209
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
193
static int	vtnet_set_allmulti(struct vtnet_softc *, int);
194
static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
210
static void	vtnet_rx_filter(struct vtnet_softc *);
195
static void	vtnet_rx_filter(struct vtnet_softc *);
211
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
196
static void	vtnet_rx_filter_mac(struct vtnet_softc *);
212
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
197
static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
Lines 215-237 Link Here
215
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
200
static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
216
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
201
static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
217
202
218
static void	vtnet_update_speed_duplex(struct vtnet_softc *);
219
static int	vtnet_is_link_up(struct vtnet_softc *);
203
static int	vtnet_is_link_up(struct vtnet_softc *);
220
static void	vtnet_update_link_status(struct vtnet_softc *);
204
static void	vtnet_update_link_status(struct vtnet_softc *);
221
static int	vtnet_ifmedia_upd(struct ifnet *);
205
static int	vtnet_ifmedia_upd(struct ifnet *);
222
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206
static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223
static void	vtnet_get_macaddr(struct vtnet_softc *);
207
static void	vtnet_get_hwaddr(struct vtnet_softc *);
224
static void	vtnet_set_macaddr(struct vtnet_softc *);
208
static void	vtnet_set_hwaddr(struct vtnet_softc *);
225
static void	vtnet_attached_set_macaddr(struct vtnet_softc *);
226
static void	vtnet_vlan_tag_remove(struct mbuf *);
209
static void	vtnet_vlan_tag_remove(struct mbuf *);
227
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
210
static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
211
static void	vtnet_set_tx_intr_threshold(struct vtnet_softc *);
228
212
229
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
213
static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
230
		    struct sysctl_oid_list *, struct vtnet_rxq *);
214
		    struct sysctl_oid_list *, struct vtnet_rxq *);
231
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
215
static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
232
		    struct sysctl_oid_list *, struct vtnet_txq *);
216
		    struct sysctl_oid_list *, struct vtnet_txq *);
233
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
217
static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
234
static void	vtnet_load_tunables(struct vtnet_softc *);
235
static void	vtnet_setup_sysctl(struct vtnet_softc *);
218
static void	vtnet_setup_sysctl(struct vtnet_softc *);
236
219
237
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
220
static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
Lines 249-332 Link Here
249
232
250
NETDUMP_DEFINE(vtnet);
233
NETDUMP_DEFINE(vtnet);
251
234
252
#define vtnet_htog16(_sc, _val)	virtio_htog16(vtnet_modern(_sc), _val)
235
/* Tunables. */
253
#define vtnet_htog32(_sc, _val)	virtio_htog32(vtnet_modern(_sc), _val)
236
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
254
#define vtnet_htog64(_sc, _val)	virtio_htog64(vtnet_modern(_sc), _val)
255
#define vtnet_gtoh16(_sc, _val)	virtio_gtoh16(vtnet_modern(_sc), _val)
256
#define vtnet_gtoh32(_sc, _val)	virtio_gtoh32(vtnet_modern(_sc), _val)
257
#define vtnet_gtoh64(_sc, _val)	virtio_gtoh64(vtnet_modern(_sc), _val)
258
259
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver");
260
261
static int vtnet_csum_disable = 0;
237
static int vtnet_csum_disable = 0;
238
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
262
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
239
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
263
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
240
    &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
264
265
static int vtnet_fixup_needs_csum = 0;
266
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
267
    &vtnet_fixup_needs_csum, 0,
268
    "Calculate valid checksum for NEEDS_CSUM packets");
269
270
static int vtnet_tso_disable = 0;
241
static int vtnet_tso_disable = 0;
271
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
242
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
272
    &vtnet_tso_disable, 0, "Disables TSO");
243
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
273
244
    0, "Disables TCP Segmentation Offload");
274
static int vtnet_lro_disable = 0;
245
static int vtnet_lro_disable = 0;
275
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
246
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
276
    &vtnet_lro_disable, 0, "Disables hardware LRO");
247
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
277
248
    0, "Disables TCP Large Receive Offload");
278
static int vtnet_mq_disable = 0;
249
static int vtnet_mq_disable = 0;
279
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
250
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
280
    &vtnet_mq_disable, 0, "Disables multiqueue support");
251
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
281
252
    0, "Disables Multi Queue support");
282
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
253
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
254
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
283
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
255
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
284
    &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
256
    &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
285
257
static int vtnet_rx_process_limit = 512;
286
static int vtnet_tso_maxlen = IP_MAXPACKET;
258
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
287
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
288
    &vtnet_tso_maxlen, 0, "TSO burst limit");
289
290
static int vtnet_rx_process_limit = 1024;
291
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
259
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
292
    &vtnet_rx_process_limit, 0,
260
    &vtnet_rx_process_limit, 0,
293
    "Number of RX segments processed in one pass");
261
    "Limits the number RX segments processed in a single pass");
294
262
295
static int vtnet_lro_entry_count = 128;
296
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
297
    &vtnet_lro_entry_count, 0, "Software LRO entry count");
298
299
/* Enable sorted LRO, and the depth of the mbuf queue. */
300
static int vtnet_lro_mbufq_depth = 0;
301
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
302
    &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
303
304
static uma_zone_t vtnet_tx_header_zone;
263
static uma_zone_t vtnet_tx_header_zone;
305
264
306
static struct virtio_feature_desc vtnet_feature_desc[] = {
265
static struct virtio_feature_desc vtnet_feature_desc[] = {
307
	{ VIRTIO_NET_F_CSUM,			"TxChecksum"		},
266
	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
308
	{ VIRTIO_NET_F_GUEST_CSUM,		"RxChecksum"		},
267
	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
309
	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,	"CtrlRxOffloads"	},
268
	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
310
	{ VIRTIO_NET_F_MAC,			"MAC"			},
269
	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
311
	{ VIRTIO_NET_F_GSO,			"TxGSO"			},
270
	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
312
	{ VIRTIO_NET_F_GUEST_TSO4,		"RxLROv4"		},
271
	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
313
	{ VIRTIO_NET_F_GUEST_TSO6,		"RxLROv6"		},
272
	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
314
	{ VIRTIO_NET_F_GUEST_ECN,		"RxLROECN"		},
273
	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
315
	{ VIRTIO_NET_F_GUEST_UFO,		"RxUFO"			},
274
	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
316
	{ VIRTIO_NET_F_HOST_TSO4,		"TxTSOv4"		},
275
	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
317
	{ VIRTIO_NET_F_HOST_TSO6,		"TxTSOv6"		},
276
	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
318
	{ VIRTIO_NET_F_HOST_ECN,		"TxTSOECN"		},
277
	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
319
	{ VIRTIO_NET_F_HOST_UFO,		"TxUFO"			},
278
	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
320
	{ VIRTIO_NET_F_MRG_RXBUF,		"MrgRxBuf"		},
279
	{ VIRTIO_NET_F_STATUS,		"Status"	},
321
	{ VIRTIO_NET_F_STATUS,			"Status"		},
280
	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
322
	{ VIRTIO_NET_F_CTRL_VQ,			"CtrlVq"		},
281
	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
323
	{ VIRTIO_NET_F_CTRL_RX,			"CtrlRxMode"		},
282
	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
324
	{ VIRTIO_NET_F_CTRL_VLAN,		"CtrlVLANFilter"	},
283
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
325
	{ VIRTIO_NET_F_CTRL_RX_EXTRA,		"CtrlRxModeExtra"	},
284
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
326
	{ VIRTIO_NET_F_GUEST_ANNOUNCE,		"GuestAnnounce"		},
285
	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
327
	{ VIRTIO_NET_F_MQ,			"Multiqueue"		},
286
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
328
	{ VIRTIO_NET_F_CTRL_MAC_ADDR,		"CtrlMacAddr"		},
329
	{ VIRTIO_NET_F_SPEED_DUPLEX,		"SpeedDuplex"		},
330
287
331
	{ 0, NULL }
288
	{ 0, NULL }
332
};
289
};
Lines 349-372 Link Here
349
306
350
#ifdef DEV_NETMAP
307
#ifdef DEV_NETMAP
351
#include <dev/netmap/if_vtnet_netmap.h>
308
#include <dev/netmap/if_vtnet_netmap.h>
352
#endif
309
#endif /* DEV_NETMAP */
353
310
354
static driver_t vtnet_driver = {
311
static driver_t vtnet_driver = {
355
	.name = "vtnet",
312
	"vtnet",
356
	.methods = vtnet_methods,
313
	vtnet_methods,
357
	.size = sizeof(struct vtnet_softc)
314
	sizeof(struct vtnet_softc)
358
};
315
};
359
static devclass_t vtnet_devclass;
316
static devclass_t vtnet_devclass;
360
317
361
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
318
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
362
    vtnet_modevent, 0);
319
    vtnet_modevent, 0);
363
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
320
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
364
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0);
321
    vtnet_modevent, 0);
365
MODULE_VERSION(vtnet, 1);
322
MODULE_VERSION(vtnet, 1);
366
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
323
MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
367
#ifdef DEV_NETMAP
324
#ifdef DEV_NETMAP
368
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
325
MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
369
#endif
326
#endif /* DEV_NETMAP */
370
327
371
static int
328
static int
372
vtnet_modevent(module_t mod, int type, void *unused)
329
vtnet_modevent(module_t mod, int type, void *unused)
Lines 408-414 Link Here
408
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
365
	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
409
		return (ENXIO);
366
		return (ENXIO);
410
367
411
	device_set_desc(dev, "VirtIO Network Adapter");
368
	device_set_desc(dev, "VirtIO Networking Adapter");
412
369
413
	return (BUS_PROBE_DEFAULT);
370
	return (BUS_PROBE_DEFAULT);
414
}
371
}
Lines 421-446 Link Here
421
378
422
	sc = device_get_softc(dev);
379
	sc = device_get_softc(dev);
423
	sc->vtnet_dev = dev;
380
	sc->vtnet_dev = dev;
381
382
	/* Register our feature descriptions. */
424
	virtio_set_feature_desc(dev, vtnet_feature_desc);
383
	virtio_set_feature_desc(dev, vtnet_feature_desc);
425
384
426
	VTNET_CORE_LOCK_INIT(sc);
385
	VTNET_CORE_LOCK_INIT(sc);
427
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
386
	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
428
	vtnet_load_tunables(sc);
429
387
430
	error = vtnet_alloc_interface(sc);
431
	if (error) {
432
		device_printf(dev, "cannot allocate interface\n");
433
		goto fail;
434
	}
435
436
	vtnet_setup_sysctl(sc);
388
	vtnet_setup_sysctl(sc);
389
	vtnet_setup_features(sc);
437
390
438
	error = vtnet_setup_features(sc);
439
	if (error) {
440
		device_printf(dev, "cannot setup features\n");
441
		goto fail;
442
	}
443
444
	error = vtnet_alloc_rx_filters(sc);
391
	error = vtnet_alloc_rx_filters(sc);
445
	if (error) {
392
	if (error) {
446
		device_printf(dev, "cannot allocate Rx filters\n");
393
		device_printf(dev, "cannot allocate Rx filters\n");
Lines 467-480 Link Here
467
414
468
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
415
	error = virtio_setup_intr(dev, INTR_TYPE_NET);
469
	if (error) {
416
	if (error) {
470
		device_printf(dev, "cannot setup interrupts\n");
417
		device_printf(dev, "cannot setup virtqueue interrupts\n");
418
		/* BMV: This will crash if during boot! */
471
		ether_ifdetach(sc->vtnet_ifp);
419
		ether_ifdetach(sc->vtnet_ifp);
472
		goto fail;
420
		goto fail;
473
	}
421
	}
474
422
475
#ifdef DEV_NETMAP
423
#ifdef DEV_NETMAP
476
	vtnet_netmap_attach(sc);
424
	vtnet_netmap_attach(sc);
477
#endif
425
#endif /* DEV_NETMAP */
426
478
	vtnet_start_taskqueues(sc);
427
	vtnet_start_taskqueues(sc);
479
428
480
fail:
429
fail:
Lines 506-512 Link Here
506
455
507
#ifdef DEV_NETMAP
456
#ifdef DEV_NETMAP
508
	netmap_detach(ifp);
457
	netmap_detach(ifp);
509
#endif
458
#endif /* DEV_NETMAP */
510
459
511
	vtnet_free_taskqueues(sc);
460
	vtnet_free_taskqueues(sc);
512
461
Lines 573-578 Link Here
573
static int
522
static int
574
vtnet_shutdown(device_t dev)
523
vtnet_shutdown(device_t dev)
575
{
524
{
525
576
	/*
526
	/*
577
	 * Suspend already does all of what we need to
527
	 * Suspend already does all of what we need to
578
	 * do here; we just never expect to be resumed.
528
	 * do here; we just never expect to be resumed.
Lines 583-596 Link Here
583
static int
533
static int
584
vtnet_attach_completed(device_t dev)
534
vtnet_attach_completed(device_t dev)
585
{
535
{
586
	struct vtnet_softc *sc;
587
536
588
	sc = device_get_softc(dev);
537
	vtnet_attach_disable_promisc(device_get_softc(dev));
589
538
590
	VTNET_CORE_LOCK(sc);
591
	vtnet_attached_set_macaddr(sc);
592
	VTNET_CORE_UNLOCK(sc);
593
594
	return (0);
539
	return (0);
595
}
540
}
596
541
Lines 610-676 Link Here
610
	return (0);
555
	return (0);
611
}
556
}
612
557
613
static int
558
static void
614
vtnet_negotiate_features(struct vtnet_softc *sc)
559
vtnet_negotiate_features(struct vtnet_softc *sc)
615
{
560
{
616
	device_t dev;
561
	device_t dev;
617
	uint64_t features, negotiated_features;
562
	uint64_t mask, features;
618
	int no_csum;
619
563
620
	dev = sc->vtnet_dev;
564
	dev = sc->vtnet_dev;
621
	features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
565
	mask = 0;
622
	    VTNET_LEGACY_FEATURES;
623
566
624
	/*
567
	/*
625
	 * TSO and LRO are only available when their corresponding checksum
568
	 * TSO and LRO are only available when their corresponding checksum
626
	 * offload feature is also negotiated.
569
	 * offload feature is also negotiated.
627
	 */
570
	 */
628
	no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
571
	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
629
	if (no_csum)
572
		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
630
		features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
573
		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
631
	if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
574
	}
632
		features &= ~VTNET_TSO_FEATURES;
575
	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
633
	if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
576
		mask |= VTNET_TSO_FEATURES;
634
		features &= ~VTNET_LRO_FEATURES;
577
	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
635
578
		mask |= VTNET_LRO_FEATURES;
636
#ifndef VTNET_LEGACY_TX
579
#ifndef VTNET_LEGACY_TX
637
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
580
	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
638
		features &= ~VIRTIO_NET_F_MQ;
581
		mask |= VIRTIO_NET_F_MQ;
639
#else
582
#else
640
	features &= ~VIRTIO_NET_F_MQ;
583
	mask |= VIRTIO_NET_F_MQ;
641
#endif
584
#endif
642
585
643
	negotiated_features = virtio_negotiate_features(dev, features);
586
	features = VTNET_FEATURES & ~mask;
587
	sc->vtnet_features = virtio_negotiate_features(dev, features);
644
588
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
646
		uint16_t mtu;
647
648
		mtu = virtio_read_dev_config_2(dev,
649
		    offsetof(struct virtio_net_config, mtu));
650
		if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
651
			device_printf(dev, "Invalid MTU value: %d. "
652
			    "MTU feature disabled.\n", mtu);
653
			features &= ~VIRTIO_NET_F_MTU;
654
			negotiated_features =
655
			    virtio_negotiate_features(dev, features);
656
		}
657
	}
658
659
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
660
		uint16_t npairs;
661
662
		npairs = virtio_read_dev_config_2(dev,
663
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
664
		if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
665
		    npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
666
			device_printf(dev, "Invalid max_virtqueue_pairs value: "
667
			    "%d. Multiqueue feature disabled.\n", npairs);
668
			features &= ~VIRTIO_NET_F_MQ;
669
			negotiated_features =
670
			    virtio_negotiate_features(dev, features);
671
		}
672
	}
673
674
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
589
	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
675
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
590
	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
676
		/*
591
		/*
Lines 684-718 Link Here
684
		 */
599
		 */
685
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
600
		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
686
			device_printf(dev,
601
			device_printf(dev,
687
			    "Host LRO disabled since both mergeable buffers "
602
			    "LRO disabled due to both mergeable buffers and "
688
			    "and indirect descriptors were not negotiated\n");
603
			    "indirect descriptors not negotiated\n");
604
689
			features &= ~VTNET_LRO_FEATURES;
605
			features &= ~VTNET_LRO_FEATURES;
690
			negotiated_features =
606
			sc->vtnet_features =
691
			    virtio_negotiate_features(dev, features);
607
			    virtio_negotiate_features(dev, features);
692
		} else
608
		} else
693
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
609
			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
694
	}
610
	}
695
696
	sc->vtnet_features = negotiated_features;
697
	sc->vtnet_negotiated_features = negotiated_features;
698
699
	return (virtio_finalize_features(dev));
700
}
611
}
701
612
702
static int
613
static void
703
vtnet_setup_features(struct vtnet_softc *sc)
614
vtnet_setup_features(struct vtnet_softc *sc)
704
{
615
{
705
	device_t dev;
616
	device_t dev;
706
	int error;
707
617
708
	dev = sc->vtnet_dev;
618
	dev = sc->vtnet_dev;
709
619
710
	error = vtnet_negotiate_features(sc);
620
	vtnet_negotiate_features(sc);
711
	if (error)
712
		return (error);
713
621
714
	if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
715
		sc->vtnet_flags |= VTNET_FLAG_MODERN;
716
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
622
	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
717
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
623
		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
718
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
624
	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
Lines 723-768 Link Here
723
		sc->vtnet_flags |= VTNET_FLAG_MAC;
629
		sc->vtnet_flags |= VTNET_FLAG_MAC;
724
	}
630
	}
725
631
726
	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
727
		sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
728
		    offsetof(struct virtio_net_config, mtu));
729
	} else
730
		sc->vtnet_max_mtu = VTNET_MAX_MTU;
731
732
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
632
	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
733
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
633
		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
734
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
634
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
735
	} else if (vtnet_modern(sc)) {
736
		/* This is identical to the mergeable header. */
737
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
738
	} else
635
	} else
739
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
636
		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
740
637
741
	if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
638
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
742
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
639
		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
743
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
640
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
744
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
641
		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
745
	else
642
	else
746
		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
643
		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
747
644
748
	/*
749
	 * Favor "hardware" LRO if negotiated, but support software LRO as
750
	 * a fallback; there is usually little benefit (or worse) with both.
751
	 */
752
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
753
	    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
754
		sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
755
756
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
645
	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
757
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
646
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
758
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
647
	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
759
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
648
		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
760
	else
649
	else
761
		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
650
		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
762
651
763
	sc->vtnet_req_vq_pairs = 1;
764
	sc->vtnet_max_vq_pairs = 1;
765
766
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
652
	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
767
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
653
		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
768
654
Lines 772-808 Link Here
772
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
658
			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
773
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
659
		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
774
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
660
			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
775
776
		if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
777
			sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
778
			    offsetof(struct virtio_net_config,
779
			    max_virtqueue_pairs));
780
		}
781
	}
661
	}
782
662
783
	if (sc->vtnet_max_vq_pairs > 1) {
663
	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
784
		int req;
664
	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
665
		sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
666
		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
667
	} else
668
		sc->vtnet_max_vq_pairs = 1;
785
669
670
	if (sc->vtnet_max_vq_pairs > 1) {
786
		/*
671
		/*
787
		 * Limit the maximum number of requested queue pairs to the
672
		 * Limit the maximum number of queue pairs to the lower of
788
		 * number of CPUs and the configured maximum.
673
		 * the number of CPUs and the configured maximum.
674
		 * The actual number of queues that get used may be less.
789
		 */
675
		 */
790
		req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
676
		int max;
791
		if (req < 0)
677
792
			req = 1;
678
		max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
793
		if (req == 0)
679
		if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
794
			req = mp_ncpus;
680
			if (max > mp_ncpus)
795
		if (req > sc->vtnet_max_vq_pairs)
681
				max = mp_ncpus;
796
			req = sc->vtnet_max_vq_pairs;
682
			if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
797
		if (req > mp_ncpus)
683
				max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
798
			req = mp_ncpus;
684
			if (max > 1) {
799
		if (req > 1) {
685
				sc->vtnet_requested_vq_pairs = max;
800
			sc->vtnet_req_vq_pairs = req;
686
				sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
801
			sc->vtnet_flags |= VTNET_FLAG_MQ;
687
			}
802
		}
688
		}
803
	}
689
	}
804
805
	return (0);
806
}
690
}
807
691
808
static int
692
static int
Lines 823-836 Link Here
823
	if (rxq->vtnrx_sg == NULL)
707
	if (rxq->vtnrx_sg == NULL)
824
		return (ENOMEM);
708
		return (ENOMEM);
825
709
826
#if defined(INET) || defined(INET6)
827
	if (vtnet_software_lro(sc)) {
828
		if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
829
		    sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
830
			return (ENOMEM);
831
	}
832
#endif
833
834
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
710
	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
835
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
711
	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
836
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
712
	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
Lines 896-902 Link Here
896
			return (error);
772
			return (error);
897
	}
773
	}
898
774
899
	vtnet_set_rx_process_limit(sc);
900
	vtnet_setup_queue_sysctl(sc);
775
	vtnet_setup_queue_sysctl(sc);
901
776
902
	return (0);
777
	return (0);
Lines 909-918 Link Here
909
	rxq->vtnrx_sc = NULL;
784
	rxq->vtnrx_sc = NULL;
910
	rxq->vtnrx_id = -1;
785
	rxq->vtnrx_id = -1;
911
786
912
#if defined(INET) || defined(INET6)
913
	tcp_lro_free(&rxq->vtnrx_lro);
914
#endif
915
916
	if (rxq->vtnrx_sg != NULL) {
787
	if (rxq->vtnrx_sg != NULL) {
917
		sglist_free(rxq->vtnrx_sg);
788
		sglist_free(rxq->vtnrx_sg);
918
		rxq->vtnrx_sg = NULL;
789
		rxq->vtnrx_sg = NULL;
Lines 1021-1059 Link Here
1021
	if (info == NULL)
892
	if (info == NULL)
1022
		return (ENOMEM);
893
		return (ENOMEM);
1023
894
1024
	for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
895
	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
1025
		rxq = &sc->vtnet_rxqs[i];
896
		rxq = &sc->vtnet_rxqs[i];
1026
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
897
		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
1027
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
898
		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1028
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
899
		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
1029
900
1030
		txq = &sc->vtnet_txqs[i];
901
		txq = &sc->vtnet_txqs[i];
1031
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
902
		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
1032
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
903
		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1033
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
904
		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
1034
	}
905
	}
1035
906
1036
	/* These queues will not be used so allocate the minimum resources. */
1037
	for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1038
		rxq = &sc->vtnet_rxqs[i];
1039
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1040
		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1041
1042
		txq = &sc->vtnet_txqs[i];
1043
		VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
1044
		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1045
	}
1046
1047
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
907
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
1048
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
908
		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
1049
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
909
		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
1050
	}
910
	}
1051
911
1052
	/*
912
	/*
1053
	 * TODO: Enable interrupt binding if this is multiqueue. This will
913
	 * Enable interrupt binding if this is multiqueue. This only matters
1054
	 * only matter when per-virtqueue MSIX is available.
914
	 * when per-vq MSIX is available.
1055
	 */
915
	 */
1056
	if (sc->vtnet_flags & VTNET_FLAG_MQ)
916
	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
1057
		flags |= 0;
917
		flags |= 0;
1058
918
1059
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
919
	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
Lines 1063-1097 Link Here
1063
}
923
}
1064
924
1065
static int
925
static int
1066
vtnet_alloc_interface(struct vtnet_softc *sc)
926
vtnet_setup_interface(struct vtnet_softc *sc)
1067
{
927
{
1068
	device_t dev;
928
	device_t dev;
1069
	struct ifnet *ifp;
929
	struct ifnet *ifp;
1070
930
1071
	dev = sc->vtnet_dev;
931
	dev = sc->vtnet_dev;
1072
932
1073
	ifp = if_alloc(IFT_ETHER);
933
	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
1074
	if (ifp == NULL)
934
	if (ifp == NULL) {
1075
		return (ENOMEM);
935
		device_printf(dev, "cannot allocate ifnet structure\n");
936
		return (ENOSPC);
937
	}
1076
938
1077
	sc->vtnet_ifp = ifp;
1078
	ifp->if_softc = sc;
1079
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
939
	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1080
940
	ifp->if_baudrate = IF_Gbps(10);	/* Approx. */
1081
	return (0);
941
	ifp->if_softc = sc;
1082
}
1083
1084
static int
1085
vtnet_setup_interface(struct vtnet_softc *sc)
1086
{
1087
	device_t dev;
1088
	struct ifnet *ifp;
1089
1090
	dev = sc->vtnet_dev;
1091
	ifp = sc->vtnet_ifp;
1092
1093
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
942
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094
	ifp->if_baudrate = IF_Gbps(10);
1095
	ifp->if_init = vtnet_init;
943
	ifp->if_init = vtnet_init;
1096
	ifp->if_ioctl = vtnet_ioctl;
944
	ifp->if_ioctl = vtnet_ioctl;
1097
	ifp->if_get_counter = vtnet_get_counter;
945
	ifp->if_get_counter = vtnet_get_counter;
Lines 1106-1163 Link Here
1106
	IFQ_SET_READY(&ifp->if_snd);
954
	IFQ_SET_READY(&ifp->if_snd);
1107
#endif
955
#endif
1108
956
1109
	vtnet_get_macaddr(sc);
957
	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
958
	    vtnet_ifmedia_sts);
959
	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
960
	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
1110
961
962
	/* Read (or generate) the MAC address for the adapter. */
963
	vtnet_get_hwaddr(sc);
964
965
	ether_ifattach(ifp, sc->vtnet_hwaddr);
966
1111
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
967
	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
1112
		ifp->if_capabilities |= IFCAP_LINKSTATE;
968
		ifp->if_capabilities |= IFCAP_LINKSTATE;
1113
969
1114
	ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
970
	/* Tell the upper layer(s) we support long frames. */
1115
	ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
971
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1116
	ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
972
	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
1117
973
1118
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
974
	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1119
		int gso;
1120
1121
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
975
		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
1122
976
1123
		gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
977
		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
1124
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
978
			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1125
			ifp->if_capabilities |= IFCAP_TSO4;
1126
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1127
			ifp->if_capabilities |= IFCAP_TSO6;
1128
		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
1129
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
979
			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
980
		} else {
981
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
982
				ifp->if_capabilities |= IFCAP_TSO4;
983
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
984
				ifp->if_capabilities |= IFCAP_TSO6;
985
			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
986
				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
987
		}
1130
988
1131
		if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
989
		if (ifp->if_capabilities & IFCAP_TSO)
1132
			int tso_maxlen;
1133
1134
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
990
			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1135
1136
			tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1137
			    vtnet_tso_maxlen);
1138
			ifp->if_hw_tsomax = tso_maxlen -
1139
			    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1140
			ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
1141
			ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1142
		}
1143
	}
991
	}
1144
992
1145
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
993
	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1146
		ifp->if_capabilities |= IFCAP_RXCSUM;
994
		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
1147
#ifdef notyet
1148
		/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
1149
		ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
1150
#endif
1151
995
1152
		if (vtnet_tunable_int(sc, "fixup_needs_csum",
996
		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
1153
		    vtnet_fixup_needs_csum) != 0)
997
		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
1154
			sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
998
			ifp->if_capabilities |= IFCAP_LRO;
1155
1156
		/* Support either "hardware" or software LRO. */
1157
		ifp->if_capabilities |= IFCAP_LRO;
1158
	}
999
	}
1159
1000
1160
	if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
1001
	if (ifp->if_capabilities & IFCAP_HWCSUM) {
1161
		/*
1002
		/*
1162
		 * VirtIO does not support VLAN tagging, but we can fake
1003
		 * VirtIO does not support VLAN tagging, but we can fake
1163
		 * it by inserting and removing the 802.1Q header during
1004
		 * it by inserting and removing the 802.1Q header during
Lines 1168-1181 Link Here
1168
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1009
		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1169
	}
1010
	}
1170
1011
1171
	if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
1012
	ifp->if_capenable = ifp->if_capabilities;
1172
		ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1173
	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1174
1013
1175
	/*
1014
	/*
1176
	 * Capabilities after here are not enabled by default.
1015
	 * Capabilities after here are not enabled by default.
1177
	 */
1016
	 */
1178
	ifp->if_capenable = ifp->if_capabilities;
1179
1017
1180
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1018
	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1181
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1019
		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
Lines 1186-1401 Link Here
1186
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1024
		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1187
	}
1025
	}
1188
1026
1189
	ether_ifattach(ifp, sc->vtnet_hwaddr);
1027
	vtnet_set_rx_process_limit(sc);
1028
	vtnet_set_tx_intr_threshold(sc);
1190
1029
1191
	/* Tell the upper layer(s) we support long frames. */
1192
	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1193
1194
	NETDUMP_SET(ifp, vtnet);
1030
	NETDUMP_SET(ifp, vtnet);
1195
1031
1196
	return (0);
1032
	return (0);
1197
}
1033
}
1198
1034
1199
static int
1035
static int
1200
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
1036
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1201
{
1037
{
1202
	int framesz;
1203
1204
	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1205
		return (MJUMPAGESIZE);
1206
	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1207
		return (MCLBYTES);
1208
1209
	/*
1210
	 * Try to scale the receive mbuf cluster size from the MTU. Without
1211
	 * the GUEST_TSO[46] features, the VirtIO specification says the
1212
	 * driver must only be able to receive ~1500 byte frames. But if
1213
	 * jumbo frames can be transmitted then try to receive jumbo.
1214
	 *
1215
	 * BMV: Not quite true when F_MTU is negotiated!
1216
	 */
1217
	if (vtnet_modern(sc)) {
1218
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1219
		framesz = sizeof(struct virtio_net_hdr_v1);
1220
	} else
1221
		framesz = sizeof(struct vtnet_rx_header);
1222
	framesz += sizeof(struct ether_vlan_header) + mtu;
1223
1224
	if (framesz <= MCLBYTES)
1225
		return (MCLBYTES);
1226
	else if (framesz <= MJUMPAGESIZE)
1227
		return (MJUMPAGESIZE);
1228
	else if (framesz <= MJUM9BYTES)
1229
		return (MJUM9BYTES);
1230
1231
	/* Sane default; avoid 16KB clusters. */
1232
	return (MCLBYTES);
1233
}
1234
1235
static int
1236
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu)
1237
{
1238
	struct ifnet *ifp;
1038
	struct ifnet *ifp;
1239
	int clustersz;
1039
	int frame_size, clsize;
1240
1040
1241
	ifp = sc->vtnet_ifp;
1041
	ifp = sc->vtnet_ifp;
1242
	VTNET_CORE_LOCK_ASSERT(sc);
1243
1042
1244
	if (ifp->if_mtu == mtu)
1043
	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1245
		return (0);
1246
	else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1247
		return (EINVAL);
1044
		return (EINVAL);
1248
1045
1249
	ifp->if_mtu = mtu;
1046
	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1250
	clustersz = vtnet_rx_cluster_size(sc, mtu);
1047
	    new_mtu;
1251
1048
1252
	if (clustersz != sc->vtnet_rx_clustersz &&
1049
	/*
1253
	    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1050
	 * Based on the new MTU (and hence frame size) determine which
1254
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1051
	 * cluster size is most appropriate for the receive queues.
1255
		vtnet_init_locked(sc);
1052
	 */
1256
	}
1053
	if (frame_size <= MCLBYTES) {
1054
		clsize = MCLBYTES;
1055
	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1056
		/* Avoid going past 9K jumbos. */
1057
		if (frame_size > MJUM9BYTES)
1058
			return (EINVAL);
1059
		clsize = MJUM9BYTES;
1060
	} else
1061
		clsize = MJUMPAGESIZE;
1257
1062
1258
	return (0);
1063
	ifp->if_mtu = new_mtu;
1259
}
1064
	sc->vtnet_rx_new_clsize = clsize;
1260
1065
1261
static int
1066
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1262
vtnet_ioctl_ifflags(struct vtnet_softc *sc)
1067
		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1263
{
1264
	struct ifnet *ifp;
1265
	int drv_running;
1266
1267
	ifp = sc->vtnet_ifp;
1268
	drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1269
1270
	VTNET_CORE_LOCK_ASSERT(sc);
1271
1272
	if ((ifp->if_flags & IFF_UP) == 0) {
1273
		if (drv_running)
1274
			vtnet_stop(sc);
1275
		goto out;
1276
	}
1277
1278
	if (!drv_running) {
1279
		vtnet_init_locked(sc);
1068
		vtnet_init_locked(sc);
1280
		goto out;
1281
	}
1069
	}
1282
1070
1283
	if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1284
	    (IFF_PROMISC | IFF_ALLMULTI)) {
1285
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1286
			return (ENOTSUP);
1287
		vtnet_rx_filter(sc);
1288
	}
1289
1290
out:
1291
	sc->vtnet_if_flags = ifp->if_flags;
1292
	return (0);
1071
	return (0);
1293
}
1072
}
1294
1073
1295
static int
1074
static int
1296
vtnet_ioctl_multi(struct vtnet_softc *sc)
1297
{
1298
	struct ifnet *ifp;
1299
1300
	ifp = sc->vtnet_ifp;
1301
1302
	VTNET_CORE_LOCK_ASSERT(sc);
1303
1304
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
1305
	    ifp->if_drv_flags & IFF_DRV_RUNNING)
1306
		vtnet_rx_filter_mac(sc);
1307
1308
	return (0);
1309
}
1310
1311
static int
1312
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1313
{
1314
	struct ifnet *ifp;
1315
	int mask, reinit, update;
1316
1317
	ifp = sc->vtnet_ifp;
1318
	mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
1319
	reinit = update = 0;
1320
1321
	VTNET_CORE_LOCK_ASSERT(sc);
1322
1323
	if (mask & IFCAP_TXCSUM)
1324
		ifp->if_capenable ^= IFCAP_TXCSUM;
1325
	if (mask & IFCAP_TXCSUM_IPV6)
1326
		ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1327
	if (mask & IFCAP_TSO4)
1328
		ifp->if_capenable ^= IFCAP_TSO4;
1329
	if (mask & IFCAP_TSO6)
1330
		ifp->if_capenable ^= IFCAP_TSO6;
1331
1332
	if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1333
		/*
1334
		 * These Rx features require the negotiated features to
1335
		 * be updated. Avoid a full reinit if possible.
1336
		 */
1337
		if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1338
			update = 1;
1339
		else
1340
			reinit = 1;
1341
1342
		/* BMV: Avoid needless renegotiation for just software LRO. */
1343
		if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
1344
		    IFCAP_LRO && vtnet_software_lro(sc))
1345
			reinit = update = 0;
1346
1347
		if (mask & IFCAP_RXCSUM)
1348
			ifp->if_capenable ^= IFCAP_RXCSUM;
1349
		if (mask & IFCAP_RXCSUM_IPV6)
1350
			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1351
		if (mask & IFCAP_LRO)
1352
			ifp->if_capenable ^= IFCAP_LRO;
1353
1354
		/*
1355
		 * VirtIO does not distinguish between IPv4 and IPv6 checksums
1356
		 * so treat them as a pair. Guest TSO (LRO) requires receive
1357
		 * checksums.
1358
		 */
1359
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
1360
			ifp->if_capenable |= IFCAP_RXCSUM;
1361
#ifdef notyet
1362
			ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
1363
#endif
1364
		} else
1365
			ifp->if_capenable &=
1366
			    ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
1367
	}
1368
1369
	if (mask & IFCAP_VLAN_HWFILTER) {
1370
		/* These Rx features require renegotiation. */
1371
		reinit = 1;
1372
1373
		if (mask & IFCAP_VLAN_HWFILTER)
1374
			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1375
	}
1376
1377
	if (mask & IFCAP_VLAN_HWTSO)
1378
		ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1379
	if (mask & IFCAP_VLAN_HWTAGGING)
1380
		ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1381
1382
	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1383
		if (reinit) {
1384
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1385
			vtnet_init_locked(sc);
1386
		} else if (update)
1387
			vtnet_update_rx_offloads(sc);
1388
	}
1389
1390
	return (0);
1391
}
1392
1393
static int
1394
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1075
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1395
{
1076
{
1396
	struct vtnet_softc *sc;
1077
	struct vtnet_softc *sc;
1397
	struct ifreq *ifr;
1078
	struct ifreq *ifr;
1398
	int error;
1079
	int reinit, mask, error;
1399
1080
1400
	sc = ifp->if_softc;
1081
	sc = ifp->if_softc;
1401
	ifr = (struct ifreq *) data;
1082
	ifr = (struct ifreq *) data;
Lines 1403-1423 Link Here
1403
1084
1404
	switch (cmd) {
1085
	switch (cmd) {
1405
	case SIOCSIFMTU:
1086
	case SIOCSIFMTU:
1406
		VTNET_CORE_LOCK(sc);
1087
		if (ifp->if_mtu != ifr->ifr_mtu) {
1407
		error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1088
			VTNET_CORE_LOCK(sc);
1408
		VTNET_CORE_UNLOCK(sc);
1089
			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1090
			VTNET_CORE_UNLOCK(sc);
1091
		}
1409
		break;
1092
		break;
1410
1093
1411
	case SIOCSIFFLAGS:
1094
	case SIOCSIFFLAGS:
1412
		VTNET_CORE_LOCK(sc);
1095
		VTNET_CORE_LOCK(sc);
1413
		error = vtnet_ioctl_ifflags(sc);
1096
		if ((ifp->if_flags & IFF_UP) == 0) {
1097
			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098
				vtnet_stop(sc);
1099
		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1100
			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1101
			    (IFF_PROMISC | IFF_ALLMULTI)) {
1102
				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1103
					vtnet_rx_filter(sc);
1104
				else {
1105
					ifp->if_flags |= IFF_PROMISC;
1106
					if ((ifp->if_flags ^ sc->vtnet_if_flags)
1107
					    & IFF_ALLMULTI)
1108
						error = ENOTSUP;
1109
				}
1110
			}
1111
		} else
1112
			vtnet_init_locked(sc);
1113
1114
		if (error == 0)
1115
			sc->vtnet_if_flags = ifp->if_flags;
1414
		VTNET_CORE_UNLOCK(sc);
1116
		VTNET_CORE_UNLOCK(sc);
1415
		break;
1117
		break;
1416
1118
1417
	case SIOCADDMULTI:
1119
	case SIOCADDMULTI:
1418
	case SIOCDELMULTI:
1120
	case SIOCDELMULTI:
1121
		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1122
			break;
1419
		VTNET_CORE_LOCK(sc);
1123
		VTNET_CORE_LOCK(sc);
1420
		error = vtnet_ioctl_multi(sc);
1124
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1125
			vtnet_rx_filter_mac(sc);
1421
		VTNET_CORE_UNLOCK(sc);
1126
		VTNET_CORE_UNLOCK(sc);
1422
		break;
1127
		break;
1423
1128
Lines 1428-1436 Link Here
1428
1133
1429
	case SIOCSIFCAP:
1134
	case SIOCSIFCAP:
1430
		VTNET_CORE_LOCK(sc);
1135
		VTNET_CORE_LOCK(sc);
1431
		error = vtnet_ioctl_ifcap(sc, ifr);
1136
		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1137
1138
		if (mask & IFCAP_TXCSUM)
1139
			ifp->if_capenable ^= IFCAP_TXCSUM;
1140
		if (mask & IFCAP_TXCSUM_IPV6)
1141
			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1142
		if (mask & IFCAP_TSO4)
1143
			ifp->if_capenable ^= IFCAP_TSO4;
1144
		if (mask & IFCAP_TSO6)
1145
			ifp->if_capenable ^= IFCAP_TSO6;
1146
1147
		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1148
		    IFCAP_VLAN_HWFILTER)) {
1149
			/* These Rx features require us to renegotiate. */
1150
			reinit = 1;
1151
1152
			if (mask & IFCAP_RXCSUM)
1153
				ifp->if_capenable ^= IFCAP_RXCSUM;
1154
			if (mask & IFCAP_RXCSUM_IPV6)
1155
				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1156
			if (mask & IFCAP_LRO)
1157
				ifp->if_capenable ^= IFCAP_LRO;
1158
			if (mask & IFCAP_VLAN_HWFILTER)
1159
				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1160
		} else
1161
			reinit = 0;
1162
1163
		if (mask & IFCAP_VLAN_HWTSO)
1164
			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1165
		if (mask & IFCAP_VLAN_HWTAGGING)
1166
			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1167
1168
		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1169
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1170
			vtnet_init_locked(sc);
1171
		}
1172
1432
		VTNET_CORE_UNLOCK(sc);
1173
		VTNET_CORE_UNLOCK(sc);
1433
		VLAN_CAPABILITIES(ifp);
1174
		VLAN_CAPABILITIES(ifp);
1175
1434
		break;
1176
		break;
1435
1177
1436
	default:
1178
	default:
Lines 1449-1454 Link Here
1449
	struct virtqueue *vq;
1191
	struct virtqueue *vq;
1450
	int nbufs, error;
1192
	int nbufs, error;
1451
1193
1194
#ifdef DEV_NETMAP
1195
	error = vtnet_netmap_rxq_populate(rxq);
1196
	if (error >= 0)
1197
		return (error);
1198
#endif  /* DEV_NETMAP */
1199
1452
	vq = rxq->vtnrx_vq;
1200
	vq = rxq->vtnrx_vq;
1453
	error = ENOSPC;
1201
	error = ENOSPC;
1454
1202
Lines 1478-1489 Link Here
1478
	struct virtqueue *vq;
1226
	struct virtqueue *vq;
1479
	struct mbuf *m;
1227
	struct mbuf *m;
1480
	int last;
1228
	int last;
1229
#ifdef DEV_NETMAP
1230
	int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX,
1231
						rxq->vtnrx_id);
1232
#else  /* !DEV_NETMAP */
1233
	int netmap_bufs = 0;
1234
#endif /* !DEV_NETMAP */
1481
1235
1482
	vq = rxq->vtnrx_vq;
1236
	vq = rxq->vtnrx_vq;
1483
	last = 0;
1237
	last = 0;
1484
1238
1485
	while ((m = virtqueue_drain(vq, &last)) != NULL)
1239
	while ((m = virtqueue_drain(vq, &last)) != NULL) {
1486
		m_freem(m);
1240
		if (!netmap_bufs)
1241
			m_freem(m);
1242
	}
1487
1243
1488
	KASSERT(virtqueue_empty(vq),
1244
	KASSERT(virtqueue_empty(vq),
1489
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1245
	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
Lines 1493-1541 Link Here
1493
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1249
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1494
{
1250
{
1495
	struct mbuf *m_head, *m_tail, *m;
1251
	struct mbuf *m_head, *m_tail, *m;
1496
	int i, size;
1252
	int i, clsize;
1497
1253
1498
	m_head = NULL;
1254
	clsize = sc->vtnet_rx_clsize;
1499
	size = sc->vtnet_rx_clustersz;
1500
1255
1501
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1256
	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1502
	    ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
1257
	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1503
1258
1504
	for (i = 0; i < nbufs; i++) {
1259
	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1505
		m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1260
	if (m_head == NULL)
1506
		if (m == NULL) {
1261
		goto fail;
1507
			sc->vtnet_stats.mbuf_alloc_failed++;
1508
			m_freem(m_head);
1509
			return (NULL);
1510
		}
1511
1262
1512
		m->m_len = size;
1263
	m_head->m_len = clsize;
1513
		if (m_head != NULL) {
1264
	m_tail = m_head;
1514
			m_tail->m_next = m;
1265
1515
			m_tail = m;
1266
	/* Allocate the rest of the chain. */
1516
		} else
1267
	for (i = 1; i < nbufs; i++) {
1517
			m_head = m_tail = m;
1268
		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1269
		if (m == NULL)
1270
			goto fail;
1271
1272
		m->m_len = clsize;
1273
		m_tail->m_next = m;
1274
		m_tail = m;
1518
	}
1275
	}
1519
1276
1520
	if (m_tailp != NULL)
1277
	if (m_tailp != NULL)
1521
		*m_tailp = m_tail;
1278
		*m_tailp = m_tail;
1522
1279
1523
	return (m_head);
1280
	return (m_head);
1281
1282
fail:
1283
	sc->vtnet_stats.mbuf_alloc_failed++;
1284
	m_freem(m_head);
1285
1286
	return (NULL);
1524
}
1287
}
1525
1288
1526
/*
1289
/*
1527
 * Slow path for when LRO without mergeable buffers is negotiated.
1290
 * Slow path for when LRO without mergeable buffers is negotiated.
1528
 */
1291
 */
1529
static int
1292
static int
1530
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1293
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1531
    int len0)
1294
    int len0)
1532
{
1295
{
1533
	struct vtnet_softc *sc;
1296
	struct vtnet_softc *sc;
1534
	struct mbuf *m, *m_prev, *m_new, *m_tail;
1297
	struct mbuf *m, *m_prev;
1535
	int len, clustersz, nreplace, error;
1298
	struct mbuf *m_new, *m_tail;
1299
	int len, clsize, nreplace, error;
1536
1300
1537
	sc = rxq->vtnrx_sc;
1301
	sc = rxq->vtnrx_sc;
1538
	clustersz = sc->vtnet_rx_clustersz;
1302
	clsize = sc->vtnet_rx_clsize;
1539
1303
1540
	m_prev = NULL;
1304
	m_prev = NULL;
1541
	m_tail = NULL;
1305
	m_tail = NULL;
Lines 1545-1567 Link Here
1545
	len = len0;
1309
	len = len0;
1546
1310
1547
	/*
1311
	/*
1548
	 * Since these mbuf chains are so large, avoid allocating a complete
1312
	 * Since these mbuf chains are so large, we avoid allocating an
1549
	 * replacement when the received frame did not consume the entire
1313
	 * entire replacement chain if possible. When the received frame
1550
	 * chain. Unused mbufs are moved to the tail of the replacement mbuf.
1314
	 * did not consume the entire chain, the unused mbufs are moved
1315
	 * to the replacement chain.
1551
	 */
1316
	 */
1552
	while (len > 0) {
1317
	while (len > 0) {
1318
		/*
1319
		 * Something is seriously wrong if we received a frame
1320
		 * larger than the chain. Drop it.
1321
		 */
1553
		if (m == NULL) {
1322
		if (m == NULL) {
1554
			sc->vtnet_stats.rx_frame_too_large++;
1323
			sc->vtnet_stats.rx_frame_too_large++;
1555
			return (EMSGSIZE);
1324
			return (EMSGSIZE);
1556
		}
1325
		}
1557
1326
1558
		/*
1327
		/* We always allocate the same cluster size. */
1559
		 * Every mbuf should have the expected cluster size sincethat
1328
		KASSERT(m->m_len == clsize,
1560
		 * is also used to allocate the replacements.
1329
		    ("%s: mbuf size %d is not the cluster size %d",
1561
		 */
1330
		    __func__, m->m_len, clsize));
1562
		KASSERT(m->m_len == clustersz,
1563
		    ("%s: mbuf size %d not expected cluster size %d", __func__,
1564
		    m->m_len, clustersz));
1565
1331
1566
		m->m_len = MIN(m->m_len, len);
1332
		m->m_len = MIN(m->m_len, len);
1567
		len -= m->m_len;
1333
		len -= m->m_len;
Lines 1571-1589 Link Here
1571
		nreplace++;
1337
		nreplace++;
1572
	}
1338
	}
1573
1339
1574
	KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1340
	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1575
	    ("%s: invalid replacement mbuf count %d max %d", __func__,
1341
	    ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1576
	    nreplace, sc->vtnet_rx_nmbufs));
1342
	    sc->vtnet_rx_nmbufs));
1577
1343
1578
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1344
	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1579
	if (m_new == NULL) {
1345
	if (m_new == NULL) {
1580
		m_prev->m_len = clustersz;
1346
		m_prev->m_len = clsize;
1581
		return (ENOBUFS);
1347
		return (ENOBUFS);
1582
	}
1348
	}
1583
1349
1584
	/*
1350
	/*
1585
	 * Move any unused mbufs from the received mbuf chain onto the
1351
	 * Move any unused mbufs from the received chain onto the end
1586
	 * end of the replacement chain.
1352
	 * of the new chain.
1587
	 */
1353
	 */
1588
	if (m_prev->m_next != NULL) {
1354
	if (m_prev->m_next != NULL) {
1589
		m_tail->m_next = m_prev->m_next;
1355
		m_tail->m_next = m_prev->m_next;
Lines 1593-1610 Link Here
1593
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1359
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1594
	if (error) {
1360
	if (error) {
1595
		/*
1361
		/*
1596
		 * The replacement is suppose to be an copy of the one
1362
		 * BAD! We could not enqueue the replacement mbuf chain. We
1597
		 * dequeued so this is a very unexpected error.
1363
		 * must restore the m0 chain to the original state if it was
1364
		 * modified so we can subsequently discard it.
1598
		 *
1365
		 *
1599
		 * Restore the m0 chain to the original state if it was
1366
		 * NOTE: The replacement is suppose to be an identical copy
1600
		 * modified so we can then discard it.
1367
		 * to the one just dequeued so this is an unexpected error.
1601
		 */
1368
		 */
1369
		sc->vtnet_stats.rx_enq_replacement_failed++;
1370
1602
		if (m_tail->m_next != NULL) {
1371
		if (m_tail->m_next != NULL) {
1603
			m_prev->m_next = m_tail->m_next;
1372
			m_prev->m_next = m_tail->m_next;
1604
			m_tail->m_next = NULL;
1373
			m_tail->m_next = NULL;
1605
		}
1374
		}
1606
		m_prev->m_len = clustersz;
1375
1607
		sc->vtnet_stats.rx_enq_replacement_failed++;
1376
		m_prev->m_len = clsize;
1608
		m_freem(m_new);
1377
		m_freem(m_new);
1609
	}
1378
	}
1610
1379
Lines 1620-1642 Link Here
1620
1389
1621
	sc = rxq->vtnrx_sc;
1390
	sc = rxq->vtnrx_sc;
1622
1391
1623
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1392
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1624
		return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
1393
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1625
1394
1626
	MPASS(m->m_next == NULL);
1395
	if (m->m_next == NULL) {
1627
	if (m->m_len < len)
1396
		/* Fast-path for the common case of just one mbuf. */
1628
		return (EMSGSIZE);
1397
		if (m->m_len < len)
1398
			return (EINVAL);
1629
1399
1630
	m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1400
		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1631
	if (m_new == NULL)
1401
		if (m_new == NULL)
1632
		return (ENOBUFS);
1402
			return (ENOBUFS);
1633
1403
1634
	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1404
		error = vtnet_rxq_enqueue_buf(rxq, m_new);
1635
	if (error) {
1405
		if (error) {
1636
		sc->vtnet_stats.rx_enq_replacement_failed++;
1406
			/*
1637
		m_freem(m_new);
1407
			 * The new mbuf is suppose to be an identical
1408
			 * copy of the one just dequeued so this is an
1409
			 * unexpected error.
1410
			 */
1411
			m_freem(m_new);
1412
			sc->vtnet_stats.rx_enq_replacement_failed++;
1413
		} else
1414
			m->m_len = len;
1638
	} else
1415
	} else
1639
		m->m_len = len;
1416
		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1640
1417
1641
	return (error);
1418
	return (error);
1642
}
1419
}
Lines 1646-1688 Link Here
1646
{
1423
{
1647
	struct vtnet_softc *sc;
1424
	struct vtnet_softc *sc;
1648
	struct sglist *sg;
1425
	struct sglist *sg;
1649
	int header_inlined, error;
1426
	struct vtnet_rx_header *rxhdr;
1427
	uint8_t *mdata;
1428
	int offset, error;
1650
1429
1651
	sc = rxq->vtnrx_sc;
1430
	sc = rxq->vtnrx_sc;
1652
	sg = rxq->vtnrx_sg;
1431
	sg = rxq->vtnrx_sg;
1432
	mdata = mtod(m, uint8_t *);
1653
1433
1654
	KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1655
	    ("%s: mbuf chain without LRO_NOMRG", __func__));
1656
	VTNET_RXQ_LOCK_ASSERT(rxq);
1434
	VTNET_RXQ_LOCK_ASSERT(rxq);
1435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1436
	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1437
	KASSERT(m->m_len == sc->vtnet_rx_clsize,
1438
	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1439
	     sc->vtnet_rx_clsize));
1657
1440
1658
	sglist_reset(sg);
1441
	sglist_reset(sg);
1659
	header_inlined = vtnet_modern(sc) ||
1442
	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1660
	    (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
1661
1662
	if (header_inlined)
1663
		error = sglist_append_mbuf(sg, m);
1664
	else {
1665
		struct vtnet_rx_header *rxhdr =
1666
		    mtod(m, struct vtnet_rx_header *);
1667
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1443
		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1444
		rxhdr = (struct vtnet_rx_header *) mdata;
1445
		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1446
		offset = sizeof(struct vtnet_rx_header);
1447
	} else
1448
		offset = 0;
1668
1449
1669
		/* Append the header and remaining mbuf data. */
1450
	sglist_append(sg, mdata + offset, m->m_len - offset);
1670
		error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1451
	if (m->m_next != NULL) {
1671
		if (error)
1452
		error = sglist_append_mbuf(sg, m->m_next);
1672
			return (error);
1453
		MPASS(error == 0);
1673
		error = sglist_append(sg, &rxhdr[1],
1674
		    m->m_len - sizeof(struct vtnet_rx_header));
1675
		if (error)
1676
			return (error);
1677
1678
		if (m->m_next != NULL)
1679
			error = sglist_append_mbuf(sg, m->m_next);
1680
	}
1454
	}
1681
1455
1682
	if (error)
1456
	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1683
		return (error);
1684
1457
1685
	return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
1458
	return (error);
1686
}
1459
}
1687
1460
1688
static int
1461
static int
Lines 1705-1777 Link Here
1705
	return (error);
1478
	return (error);
1706
}
1479
}
1707
1480
1481
/*
1482
 * Use the checksum offset in the VirtIO header to set the
1483
 * correct CSUM_* flags.
1484
 */
1708
static int
1485
static int
1709
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1486
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1710
    int hoff, struct virtio_net_hdr *hdr)
1487
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1711
{
1488
{
1712
	struct vtnet_softc *sc;
1489
	struct vtnet_softc *sc;
1713
	int error;
1490
#if defined(INET) || defined(INET6)
1491
	int offset = hdr->csum_start + hdr->csum_offset;
1492
#endif
1714
1493
1715
	sc = rxq->vtnrx_sc;
1494
	sc = rxq->vtnrx_sc;
1716
1495
1717
	/*
1496
	/* Only do a basic sanity check on the offset. */
1718
	 * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1497
	switch (eth_type) {
1719
	 * not have an analogous CSUM flag. The checksum has been validated,
1498
#if defined(INET)
1720
	 * but is incomplete (TCP/UDP pseudo header).
1499
	case ETHERTYPE_IP:
1721
	 *
1500
		if (__predict_false(offset < ip_start + sizeof(struct ip)))
1722
	 * The packet is likely from another VM on the same host that itself
1501
			return (1);
1723
	 * performed checksum offloading so Tx/Rx is basically a memcpy and
1502
		break;
1724
	 * the checksum has little value.
1503
#endif
1725
	 *
1504
#if defined(INET6)
1726
	 * Default to receiving the packet as-is for performance reasons, but
1505
	case ETHERTYPE_IPV6:
1727
	 * this can cause issues if the packet is to be forwarded because it
1506
		if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1728
	 * does not contain a valid checksum. This patch may be helpful:
1507
			return (1);
1729
	 * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1508
		break;
1730
	 * compute the checksum if requested.
1509
#endif
1731
	 *
1510
	default:
1732
	 * BMV: Need to add an CSUM_PARTIAL flag?
1511
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1733
	 */
1512
		return (1);
1734
	if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1735
		error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1736
		return (error);
1737
	}
1513
	}
1738
1514
1739
	/*
1515
	/*
1740
	 * Compute the checksum in the driver so the packet will contain a
1516
	 * Use the offset to determine the appropriate CSUM_* flags. This is
1741
	 * valid checksum. The checksum is at csum_offset from csum_start.
1517
	 * a bit dirty, but we can get by with it since the checksum offsets
1518
	 * happen to be different. We assume the host host does not do IPv4
1519
	 * header checksum offloading.
1742
	 */
1520
	 */
1743
	switch (etype) {
1521
	switch (hdr->csum_offset) {
1744
#if defined(INET) || defined(INET6)
1522
	case offsetof(struct udphdr, uh_sum):
1745
	case ETHERTYPE_IP:
1523
	case offsetof(struct tcphdr, th_sum):
1746
	case ETHERTYPE_IPV6: {
1747
		int csum_off, csum_end;
1748
		uint16_t csum;
1749
1750
		csum_off = hdr->csum_start + hdr->csum_offset;
1751
		csum_end = csum_off + sizeof(uint16_t);
1752
1753
		/* Assume checksum will be in the first mbuf. */
1754
		if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1755
			return (1);
1756
1757
		/*
1758
		 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1759
		 * checksum and write it at the specified offset. We could
1760
		 * try to verify the packet: csum_start should probably
1761
		 * correspond to the start of the TCP/UDP header.
1762
		 *
1763
		 * BMV: Need to properly handle UDP with zero checksum. Is
1764
		 * the IPv4 header checksum implicitly validated?
1765
		 */
1766
		csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1767
		*(uint16_t *)(mtodo(m, csum_off)) = csum;
1768
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1524
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1769
		m->m_pkthdr.csum_data = 0xFFFF;
1525
		m->m_pkthdr.csum_data = 0xFFFF;
1770
		break;
1526
		break;
1771
	}
1772
#endif
1773
	default:
1527
	default:
1774
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1528
		sc->vtnet_stats.rx_csum_bad_offset++;
1775
		return (1);
1529
		return (1);
1776
	}
1530
	}
1777
1531
Lines 1779-1833 Link Here
1779
}
1533
}
1780
1534
1781
static int
1535
static int
1782
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1536
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1783
    uint16_t etype, int hoff, struct virtio_net_hdr *hdr)
1537
    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1784
{
1538
{
1785
	struct vtnet_softc *sc;
1539
	struct vtnet_softc *sc;
1786
	int protocol;
1540
	int offset, proto;
1787
1541
1788
	sc = rxq->vtnrx_sc;
1542
	sc = rxq->vtnrx_sc;
1789
1543
1790
	switch (etype) {
1544
	switch (eth_type) {
1791
#if defined(INET)
1545
#if defined(INET)
1792
	case ETHERTYPE_IP:
1546
	case ETHERTYPE_IP: {
1793
		if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1547
		struct ip *ip;
1794
			protocol = IPPROTO_DONE;
1548
		if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1795
		else {
1549
			return (1);
1796
			struct ip *ip = (struct ip *)(m->m_data + hoff);
1550
		ip = (struct ip *)(m->m_data + ip_start);
1797
			protocol = ip->ip_p;
1551
		proto = ip->ip_p;
1798
		}
1552
		offset = ip_start + (ip->ip_hl << 2);
1799
		break;
1553
		break;
1554
	}
1800
#endif
1555
#endif
1801
#if defined(INET6)
1556
#if defined(INET6)
1802
	case ETHERTYPE_IPV6:
1557
	case ETHERTYPE_IPV6:
1803
		if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1558
		if (__predict_false(m->m_len < ip_start +
1804
		    || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1559
		    sizeof(struct ip6_hdr)))
1805
			protocol = IPPROTO_DONE;
1560
			return (1);
1561
		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1562
		if (__predict_false(offset < 0))
1563
			return (1);
1806
		break;
1564
		break;
1807
#endif
1565
#endif
1808
	default:
1566
	default:
1809
		protocol = IPPROTO_DONE;
1567
		sc->vtnet_stats.rx_csum_bad_ethtype++;
1810
		break;
1568
		return (1);
1811
	}
1569
	}
1812
1570
1813
	switch (protocol) {
1571
	switch (proto) {
1814
	case IPPROTO_TCP:
1572
	case IPPROTO_TCP:
1573
		if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1574
			return (1);
1575
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1576
		m->m_pkthdr.csum_data = 0xFFFF;
1577
		break;
1815
	case IPPROTO_UDP:
1578
	case IPPROTO_UDP:
1579
		if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1580
			return (1);
1816
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1581
		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1817
		m->m_pkthdr.csum_data = 0xFFFF;
1582
		m->m_pkthdr.csum_data = 0xFFFF;
1818
		break;
1583
		break;
1819
	default:
1584
	default:
1820
		/*
1585
		/*
1821
		 * FreeBSD does not support checksum offloading of this
1586
		 * For the remaining protocols, FreeBSD does not support
1822
		 * protocol. Let the stack re-verify the checksum later
1587
		 * checksum offloading, so the checksum will be recomputed.
1823
		 * if the protocol is supported.
1824
		 */
1588
		 */
1825
#if 0
1589
#if 0
1826
		if_printf(sc->vtnet_ifp,
1590
		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1827
		    "%s: checksum offload of unsupported protocol "
1591
		    "protocol eth_type=%#x proto=%d csum_start=%d "
1828
		    "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1592
		    "csum_offset=%d\n", __func__, eth_type, proto,
1829
		    __func__, etype, protocol, hdr->csum_start,
1593
		    hdr->csum_start, hdr->csum_offset);
1830
		    hdr->csum_offset);
1831
#endif
1594
#endif
1832
		break;
1595
		break;
1833
	}
1596
	}
Lines 1835-1863 Link Here
1835
	return (0);
1598
	return (0);
1836
}
1599
}
1837
1600
1601
/*
1602
 * Set the appropriate CSUM_* flags. Unfortunately, the information
1603
 * provided is not directly useful to us. The VirtIO header gives the
1604
 * offset of the checksum, which is all Linux needs, but this is not
1605
 * how FreeBSD does things. We are forced to peek inside the packet
1606
 * a bit.
1607
 *
1608
 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1609
 * could accept the offsets and let the stack figure it out.
1610
 */
1838
static int
1611
static int
1839
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1612
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1840
    struct virtio_net_hdr *hdr)
1613
    struct virtio_net_hdr *hdr)
1841
{
1614
{
1842
	const struct ether_header *eh;
1615
	struct ether_header *eh;
1843
	int hoff;
1616
	struct ether_vlan_header *evh;
1844
	uint16_t etype;
1617
	uint16_t eth_type;
1618
	int offset, error;
1845
1619
1846
	eh = mtod(m, const struct ether_header *);
1620
	eh = mtod(m, struct ether_header *);
1847
	etype = ntohs(eh->ether_type);
1621
	eth_type = ntohs(eh->ether_type);
1848
	if (etype == ETHERTYPE_VLAN) {
1622
	if (eth_type == ETHERTYPE_VLAN) {
1849
		/* TODO BMV: Handle QinQ. */
1623
		/* BMV: We should handle nested VLAN tags too. */
1850
		const struct ether_vlan_header *evh =
1624
		evh = mtod(m, struct ether_vlan_header *);
1851
		    mtod(m, const struct ether_vlan_header *);
1625
		eth_type = ntohs(evh->evl_proto);
1852
		etype = ntohs(evh->evl_proto);
1626
		offset = sizeof(struct ether_vlan_header);
1853
		hoff = sizeof(struct ether_vlan_header);
1854
	} else
1627
	} else
1855
		hoff = sizeof(struct ether_header);
1628
		offset = sizeof(struct ether_header);
1856
1629
1857
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1630
	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1858
		return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1631
		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1859
	else /* VIRTIO_NET_HDR_F_DATA_VALID */
1632
	else
1860
		return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
1633
		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1634
1635
	return (error);
1861
}
1636
}
1862
1637
1863
static void
1638
static void
Lines 1892-1907 Link Here
1892
{
1667
{
1893
	struct vtnet_softc *sc;
1668
	struct vtnet_softc *sc;
1894
	struct virtqueue *vq;
1669
	struct virtqueue *vq;
1895
	struct mbuf *m_tail;
1670
	struct mbuf *m, *m_tail;
1671
	int len;
1896
1672
1897
	sc = rxq->vtnrx_sc;
1673
	sc = rxq->vtnrx_sc;
1898
	vq = rxq->vtnrx_vq;
1674
	vq = rxq->vtnrx_vq;
1899
	m_tail = m_head;
1675
	m_tail = m_head;
1900
1676
1901
	while (--nbufs > 0) {
1677
	while (--nbufs > 0) {
1902
		struct mbuf *m;
1903
		int len;
1904
1905
		m = virtqueue_dequeue(vq, &len);
1678
		m = virtqueue_dequeue(vq, &len);
1906
		if (m == NULL) {
1679
		if (m == NULL) {
1907
			rxq->vtnrx_stats.vrxs_ierrors++;
1680
			rxq->vtnrx_stats.vrxs_ierrors++;
Lines 1936-1970 Link Here
1936
	return (1);
1709
	return (1);
1937
}
1710
}
1938
1711
1939
#if defined(INET) || defined(INET6)
1940
static int
1941
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
1942
{
1943
	struct lro_ctrl *lro;
1944
1945
	lro = &rxq->vtnrx_lro;
1946
1947
	if (lro->lro_mbuf_max != 0) {
1948
		tcp_lro_queue_mbuf(lro, m);
1949
		return (0);
1950
	}
1951
1952
	return (tcp_lro_rx(lro, m, 0));
1953
}
1954
#endif
1955
1956
static void
1712
static void
1957
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1713
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1958
    struct virtio_net_hdr *hdr)
1714
    struct virtio_net_hdr *hdr)
1959
{
1715
{
1960
	struct vtnet_softc *sc;
1716
	struct vtnet_softc *sc;
1961
	struct ifnet *ifp;
1717
	struct ifnet *ifp;
1718
	struct ether_header *eh;
1962
1719
1963
	sc = rxq->vtnrx_sc;
1720
	sc = rxq->vtnrx_sc;
1964
	ifp = sc->vtnet_ifp;
1721
	ifp = sc->vtnet_ifp;
1965
1722
1966
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1723
	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1967
		struct ether_header *eh = mtod(m, struct ether_header *);
1724
		eh = mtod(m, struct ether_header *);
1968
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1725
		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1969
			vtnet_vlan_tag_remove(m);
1726
			vtnet_vlan_tag_remove(m);
1970
			/*
1727
			/*
Lines 1979-2014 Link Here
1979
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1736
	m->m_pkthdr.flowid = rxq->vtnrx_id;
1980
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1737
	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1981
1738
1982
	if (hdr->flags &
1739
	/*
1983
	    (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
1740
	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1741
	 * distinction that Linux does. Need to reevaluate if performing
1742
	 * offloading for the NEEDS_CSUM case is really appropriate.
1743
	 */
1744
	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1745
	    VIRTIO_NET_HDR_F_DATA_VALID)) {
1984
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1746
		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1985
			rxq->vtnrx_stats.vrxs_csum++;
1747
			rxq->vtnrx_stats.vrxs_csum++;
1986
		else
1748
		else
1987
			rxq->vtnrx_stats.vrxs_csum_failed++;
1749
			rxq->vtnrx_stats.vrxs_csum_failed++;
1988
	}
1750
	}
1989
1751
1990
	if (hdr->gso_size != 0) {
1991
		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1992
		case VIRTIO_NET_HDR_GSO_TCPV4:
1993
		case VIRTIO_NET_HDR_GSO_TCPV6:
1994
			m->m_pkthdr.lro_nsegs =
1995
			    howmany(m->m_pkthdr.len, hdr->gso_size);
1996
			rxq->vtnrx_stats.vrxs_host_lro++;
1997
			break;
1998
		}
1999
	}
2000
2001
	rxq->vtnrx_stats.vrxs_ipackets++;
1752
	rxq->vtnrx_stats.vrxs_ipackets++;
2002
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1753
	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
2003
1754
2004
#if defined(INET) || defined(INET6)
1755
	VTNET_RXQ_UNLOCK(rxq);
2005
	if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
2006
		if (vtnet_lro_rx(rxq, m) == 0)
2007
			return;
2008
	}
2009
#endif
2010
2011
	(*ifp->if_input)(ifp, m);
1756
	(*ifp->if_input)(ifp, m);
1757
	VTNET_RXQ_LOCK(rxq);
2012
}
1758
}
2013
1759
2014
static int
1760
static int
Lines 2018-2042 Link Here
2018
	struct vtnet_softc *sc;
1764
	struct vtnet_softc *sc;
2019
	struct ifnet *ifp;
1765
	struct ifnet *ifp;
2020
	struct virtqueue *vq;
1766
	struct virtqueue *vq;
2021
	int deq, count;
1767
	struct mbuf *m;
1768
	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1769
	int len, deq, nbufs, adjsz, count;
2022
1770
2023
	sc = rxq->vtnrx_sc;
1771
	sc = rxq->vtnrx_sc;
2024
	vq = rxq->vtnrx_vq;
1772
	vq = rxq->vtnrx_vq;
2025
	ifp = sc->vtnet_ifp;
1773
	ifp = sc->vtnet_ifp;
1774
	hdr = &lhdr;
2026
	deq = 0;
1775
	deq = 0;
2027
	count = sc->vtnet_rx_process_limit;
1776
	count = sc->vtnet_rx_process_limit;
2028
1777
2029
	VTNET_RXQ_LOCK_ASSERT(rxq);
1778
	VTNET_RXQ_LOCK_ASSERT(rxq);
2030
1779
2031
#ifdef DEV_NETMAP
2032
	if (netmap_rx_irq(ifp, 0, &deq))
2033
		return (0);
2034
#endif
2035
2036
	while (count-- > 0) {
1780
	while (count-- > 0) {
2037
		struct mbuf *m;
2038
		int len, nbufs, adjsz;
2039
2040
		m = virtqueue_dequeue(vq, &len);
1781
		m = virtqueue_dequeue(vq, &len);
2041
		if (m == NULL)
1782
		if (m == NULL)
2042
			break;
1783
			break;
Lines 2048-2069 Link Here
2048
			continue;
1789
			continue;
2049
		}
1790
		}
2050
1791
2051
		if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
1792
		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
2052
			struct virtio_net_hdr_mrg_rxbuf *mhdr =
2053
			    mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2054
			nbufs = vtnet_htog16(sc, mhdr->num_buffers);
2055
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2056
		} else if (vtnet_modern(sc)) {
2057
			nbufs = 1; /* num_buffers is always 1 */
2058
			adjsz = sizeof(struct virtio_net_hdr_v1);
2059
		} else {
2060
			nbufs = 1;
1793
			nbufs = 1;
2061
			adjsz = sizeof(struct vtnet_rx_header);
1794
			adjsz = sizeof(struct vtnet_rx_header);
2062
			/*
1795
			/*
2063
			 * Account for our gap between the header and start of
1796
			 * Account for our pad inserted between the header
2064
			 * data to keep the segments separated.
1797
			 * and the actual start of the frame.
2065
			 */
1798
			 */
2066
			len += VTNET_RX_HEADER_PAD;
1799
			len += VTNET_RX_HEADER_PAD;
1800
		} else {
1801
			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1802
			nbufs = mhdr->num_buffers;
1803
			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2067
		}
1804
		}
2068
1805
2069
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1806
		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
Lines 2085-2113 Link Here
2085
		}
1822
		}
2086
1823
2087
		/*
1824
		/*
2088
		 * Save an endian swapped version of the header prior to it
1825
		 * Save copy of header before we strip it. For both mergeable
2089
		 * being stripped. The header is always at the start of the
1826
		 * and non-mergeable, the header is at the beginning of the
2090
		 * mbuf data. num_buffers was already saved (and not needed)
1827
		 * mbuf data. We no longer need num_buffers, so always use a
2091
		 * so use the standard header.
1828
		 * regular header.
1829
		 *
1830
		 * BMV: Is this memcpy() expensive? We know the mbuf data is
1831
		 * still valid even after the m_adj().
2092
		 */
1832
		 */
2093
		hdr = mtod(m, struct virtio_net_hdr *);
1833
		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
2094
		lhdr.flags = hdr->flags;
2095
		lhdr.gso_type = hdr->gso_type;
2096
		lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
2097
		lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
2098
		lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
2099
		lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
2100
		m_adj(m, adjsz);
1834
		m_adj(m, adjsz);
2101
1835
2102
		vtnet_rxq_input(rxq, m, &lhdr);
1836
		vtnet_rxq_input(rxq, m, hdr);
1837
1838
		/* Must recheck after dropping the Rx lock. */
1839
		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1840
			break;
2103
	}
1841
	}
2104
1842
2105
	if (deq > 0) {
1843
	if (deq > 0)
2106
#if defined(INET) || defined(INET6)
2107
		tcp_lro_flush_all(&rxq->vtnrx_lro);
2108
#endif
2109
		virtqueue_notify(vq);
1844
		virtqueue_notify(vq);
2110
	}
2111
1845
2112
	return (count > 0 ? 0 : EAGAIN);
1846
	return (count > 0 ? 0 : EAGAIN);
2113
}
1847
}
Lines 2136-2141 Link Here
2136
		return;
1870
		return;
2137
	}
1871
	}
2138
1872
1873
#ifdef DEV_NETMAP
1874
	if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS)
1875
		return;
1876
#endif /* DEV_NETMAP */
1877
2139
	VTNET_RXQ_LOCK(rxq);
1878
	VTNET_RXQ_LOCK(rxq);
2140
1879
2141
again:
1880
again:
Lines 2155-2162 Link Here
2155
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1894
		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2156
			goto again;
1895
			goto again;
2157
1896
2158
		rxq->vtnrx_stats.vrxs_rescheduled++;
2159
		VTNET_RXQ_UNLOCK(rxq);
1897
		VTNET_RXQ_UNLOCK(rxq);
1898
		rxq->vtnrx_stats.vrxs_rescheduled++;
2160
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1899
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2161
	} else
1900
	} else
2162
		VTNET_RXQ_UNLOCK(rxq);
1901
		VTNET_RXQ_UNLOCK(rxq);
Lines 2186-2234 Link Here
2186
		if (!more)
1925
		if (!more)
2187
			vtnet_rxq_disable_intr(rxq);
1926
			vtnet_rxq_disable_intr(rxq);
2188
		rxq->vtnrx_stats.vrxs_rescheduled++;
1927
		rxq->vtnrx_stats.vrxs_rescheduled++;
2189
		VTNET_RXQ_UNLOCK(rxq);
2190
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1928
		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2191
	} else
1929
	}
2192
		VTNET_RXQ_UNLOCK(rxq);
2193
}
2194
1930
2195
static int
1931
	VTNET_RXQ_UNLOCK(rxq);
2196
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
2197
{
2198
	struct vtnet_softc *sc;
2199
	int threshold;
2200
2201
	sc = txq->vtntx_sc;
2202
2203
	/*
2204
	 * The Tx interrupt is disabled until the queue free count falls
2205
	 * below our threshold. Completed frames are drained from the Tx
2206
	 * virtqueue before transmitting new frames and in the watchdog
2207
	 * callout, so the frequency of Tx interrupts is greatly reduced,
2208
	 * at the cost of not freeing mbufs as quickly as they otherwise
2209
	 * would be.
2210
	 */
2211
	threshold = virtqueue_size(txq->vtntx_vq) / 4;
2212
2213
	/*
2214
	 * Without indirect descriptors, leave enough room for the most
2215
	 * segments we handle.
2216
	 */
2217
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2218
	    threshold < sc->vtnet_tx_nsegs)
2219
		threshold = sc->vtnet_tx_nsegs;
2220
2221
	return (threshold);
2222
}
1932
}
2223
1933
2224
static int
1934
static int
2225
vtnet_txq_below_threshold(struct vtnet_txq *txq)
1935
vtnet_txq_below_threshold(struct vtnet_txq *txq)
2226
{
1936
{
1937
	struct vtnet_softc *sc;
2227
	struct virtqueue *vq;
1938
	struct virtqueue *vq;
2228
1939
1940
	sc = txq->vtntx_sc;
2229
	vq = txq->vtntx_vq;
1941
	vq = txq->vtntx_vq;
2230
1942
2231
	return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
1943
	return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
2232
}
1944
}
2233
1945
2234
static int
1946
static int
Lines 2263-2275 Link Here
2263
	struct virtqueue *vq;
1975
	struct virtqueue *vq;
2264
	struct vtnet_tx_header *txhdr;
1976
	struct vtnet_tx_header *txhdr;
2265
	int last;
1977
	int last;
1978
#ifdef DEV_NETMAP
1979
	int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX,
1980
						txq->vtntx_id);
1981
#else  /* !DEV_NETMAP */
1982
	int netmap_bufs = 0;
1983
#endif /* !DEV_NETMAP */
2266
1984
2267
	vq = txq->vtntx_vq;
1985
	vq = txq->vtntx_vq;
2268
	last = 0;
1986
	last = 0;
2269
1987
2270
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1988
	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2271
		m_freem(txhdr->vth_mbuf);
1989
		if (!netmap_bufs) {
2272
		uma_zfree(vtnet_tx_header_zone, txhdr);
1990
			m_freem(txhdr->vth_mbuf);
1991
			uma_zfree(vtnet_tx_header_zone, txhdr);
1992
		}
2273
	}
1993
	}
2274
1994
2275
	KASSERT(virtqueue_empty(vq),
1995
	KASSERT(virtqueue_empty(vq),
Lines 2277-2287 Link Here
2277
}
1997
}
2278
1998
2279
/*
1999
/*
2280
 * BMV: This can go away once we finally have offsets in the mbuf header.
2000
 * BMV: Much of this can go away once we finally have offsets in
2001
 * the mbuf packet header. Bug andre@.
2281
 */
2002
 */
2282
static int
2003
static int
2283
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
2004
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2284
    int *proto, int *start)
2005
    int *etype, int *proto, int *start)
2285
{
2006
{
2286
	struct vtnet_softc *sc;
2007
	struct vtnet_softc *sc;
2287
	struct ether_vlan_header *evh;
2008
	struct ether_vlan_header *evh;
Lines 2325-2331 Link Here
2325
		break;
2046
		break;
2326
#endif
2047
#endif
2327
	default:
2048
	default:
2328
		sc->vtnet_stats.tx_csum_unknown_ethtype++;
2049
		sc->vtnet_stats.tx_csum_bad_ethtype++;
2329
		return (EINVAL);
2050
		return (EINVAL);
2330
	}
2051
	}
2331
2052
Lines 2333-2339 Link Here
2333
}
2054
}
2334
2055
2335
static int
2056
static int
2336
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags,
2057
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2337
    int offset, struct virtio_net_hdr *hdr)
2058
    int offset, struct virtio_net_hdr *hdr)
2338
{
2059
{
2339
	static struct timeval lastecn;
2060
	static struct timeval lastecn;
Lines 2349-2365 Link Here
2349
	} else
2070
	} else
2350
		tcp = (struct tcphdr *)(m->m_data + offset);
2071
		tcp = (struct tcphdr *)(m->m_data + offset);
2351
2072
2352
	hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
2073
	hdr->hdr_len = offset + (tcp->th_off << 2);
2353
	hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2074
	hdr->gso_size = m->m_pkthdr.tso_segsz;
2354
	hdr->gso_type = (flags & CSUM_IP_TSO) ?
2075
	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2355
	    VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
2076
	    VIRTIO_NET_HDR_GSO_TCPV6;
2356
2077
2357
	if (__predict_false(tcp->th_flags & TH_CWR)) {
2078
	if (tcp->th_flags & TH_CWR) {
2358
		/*
2079
		/*
2359
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2080
		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2360
		 * FreeBSD, ECN support is not on a per-interface basis,
2081
		 * ECN support is not on a per-interface basis, but globally via
2361
		 * but globally via the net.inet.tcp.ecn.enable sysctl
2082
		 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2362
		 * knob. The default is off.
2363
		 */
2083
		 */
2364
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2084
		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2365
			if (ppsratecheck(&lastecn, &curecn, 1))
2085
			if (ppsratecheck(&lastecn, &curecn, 1))
Lines 2389-2424 Link Here
2389
	if (error)
2109
	if (error)
2390
		goto drop;
2110
		goto drop;
2391
2111
2392
	if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2112
	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2393
		/* Sanity check the parsed mbuf matches the offload flags. */
2113
	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2394
		if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2114
		/*
2395
		    etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2115
		 * We could compare the IP protocol vs the CSUM_ flag too,
2396
		    && etype != ETHERTYPE_IPV6))) {
2116
		 * but that really should not be necessary.
2397
			sc->vtnet_stats.tx_csum_proto_mismatch++;
2117
		 */
2398
			goto drop;
2399
		}
2400
2401
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2118
		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2402
		hdr->csum_start = vtnet_gtoh16(sc, csum_start);
2119
		hdr->csum_start = csum_start;
2403
		hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
2120
		hdr->csum_offset = m->m_pkthdr.csum_data;
2404
		txq->vtntx_stats.vtxs_csum++;
2121
		txq->vtntx_stats.vtxs_csum++;
2405
	}
2122
	}
2406
2123
2407
	if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2124
	if (flags & CSUM_TSO) {
2408
		/*
2409
		 * Sanity check the parsed mbuf IP protocol is TCP, and
2410
		 * VirtIO TSO reqires the checksum offloading above.
2411
		 */
2412
		if (__predict_false(proto != IPPROTO_TCP)) {
2125
		if (__predict_false(proto != IPPROTO_TCP)) {
2126
			/* Likely failed to correctly parse the mbuf. */
2413
			sc->vtnet_stats.tx_tso_not_tcp++;
2127
			sc->vtnet_stats.tx_tso_not_tcp++;
2414
			goto drop;
2128
			goto drop;
2415
		} else if (__predict_false((hdr->flags &
2416
		    VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2417
			sc->vtnet_stats.tx_tso_without_csum++;
2418
			goto drop;
2419
		}
2129
		}
2420
2130
2421
		error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr);
2131
		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2132
		    ("%s: mbuf %p TSO without checksum offload %#x",
2133
		    __func__, m, flags));
2134
2135
		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2422
		if (error)
2136
		if (error)
2423
			goto drop;
2137
			goto drop;
2424
	}
2138
	}
Lines 2447-2457 Link Here
2447
2161
2448
	sglist_reset(sg);
2162
	sglist_reset(sg);
2449
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2163
	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2450
	if (error != 0 || sg->sg_nseg != 1) {
2164
	KASSERT(error == 0 && sg->sg_nseg == 1,
2451
		KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
2165
	    ("%s: error %d adding header to sglist", __func__, error));
2452
		    __func__, error, sg->sg_nseg));
2453
		goto fail;
2454
	}
2455
2166
2456
	error = sglist_append_mbuf(sg, m);
2167
	error = sglist_append_mbuf(sg, m);
2457
	if (error) {
2168
	if (error) {
Lines 2499-2507 Link Here
2499
	}
2210
	}
2500
2211
2501
	/*
2212
	/*
2502
	 * Always use the non-mergeable header, regardless if mergable headers
2213
	 * Always use the non-mergeable header, regardless if the feature
2503
	 * were negotiated, because for transmit num_buffers is always zero.
2214
	 * was negotiated. For transmit, num_buffers is always zero. The
2504
	 * The vtnet_hdr_size is used to enqueue the right header size segment.
2215
	 * vtnet_hdr_size is used to enqueue the correct header size.
2505
	 */
2216
	 */
2506
	hdr = &txhdr->vth_uhdr.hdr;
2217
	hdr = &txhdr->vth_uhdr.hdr;
2507
2218
Lines 2523-2531 Link Here
2523
	}
2234
	}
2524
2235
2525
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2236
	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2237
	if (error == 0)
2238
		return (0);
2239
2526
fail:
2240
fail:
2527
	if (error)
2241
	uma_zfree(vtnet_tx_header_zone, txhdr);
2528
		uma_zfree(vtnet_tx_header_zone, txhdr);
2529
2242
2530
	return (error);
2243
	return (error);
2531
}
2244
}
Lines 2674-2679 Link Here
2674
	sc = ifp->if_softc;
2387
	sc = ifp->if_softc;
2675
	npairs = sc->vtnet_act_vq_pairs;
2388
	npairs = sc->vtnet_act_vq_pairs;
2676
2389
2390
	/* check if flowid is set */
2677
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2391
	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2678
		i = m->m_pkthdr.flowid % npairs;
2392
		i = m->m_pkthdr.flowid % npairs;
2679
	else
2393
	else
Lines 2763-2775 Link Here
2763
	deq = 0;
2477
	deq = 0;
2764
	VTNET_TXQ_LOCK_ASSERT(txq);
2478
	VTNET_TXQ_LOCK_ASSERT(txq);
2765
2479
2766
#ifdef DEV_NETMAP
2767
	if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2768
		virtqueue_disable_intr(vq); // XXX luigi
2769
		return (0); // XXX or 1 ?
2770
	}
2771
#endif
2772
2773
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2480
	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2774
		m = txhdr->vth_mbuf;
2481
		m = txhdr->vth_mbuf;
2775
		deq++;
2482
		deq++;
Lines 2811-2816 Link Here
2811
		return;
2518
		return;
2812
	}
2519
	}
2813
2520
2521
#ifdef DEV_NETMAP
2522
	if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2523
		return;
2524
#endif /* DEV_NETMAP */
2525
2814
	VTNET_TXQ_LOCK(txq);
2526
	VTNET_TXQ_LOCK(txq);
2815
2527
2816
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2528
	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
Lines 2997-3003 Link Here
2997
	 * Most drivers just ignore the return value - it only fails
2709
	 * Most drivers just ignore the return value - it only fails
2998
	 * with ENOMEM so an error is not likely.
2710
	 * with ENOMEM so an error is not likely.
2999
	 */
2711
	 */
3000
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
2712
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3001
		rxq = &sc->vtnet_rxqs[i];
2713
		rxq = &sc->vtnet_rxqs[i];
3002
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2714
		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
3003
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2715
		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
Lines 3027-3033 Link Here
3027
		rxq = &sc->vtnet_rxqs[i];
2739
		rxq = &sc->vtnet_rxqs[i];
3028
		if (rxq->vtnrx_tq != NULL) {
2740
		if (rxq->vtnrx_tq != NULL) {
3029
			taskqueue_free(rxq->vtnrx_tq);
2741
			taskqueue_free(rxq->vtnrx_tq);
3030
			rxq->vtnrx_vq = NULL;
2742
			rxq->vtnrx_tq = NULL;
3031
		}
2743
		}
3032
2744
3033
		txq = &sc->vtnet_txqs[i];
2745
		txq = &sc->vtnet_txqs[i];
Lines 3067-3078 Link Here
3067
	struct vtnet_txq *txq;
2779
	struct vtnet_txq *txq;
3068
	int i;
2780
	int i;
3069
2781
3070
#ifdef DEV_NETMAP
2782
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3071
	if (nm_native_on(NA(sc->vtnet_ifp)))
3072
		return;
3073
#endif
3074
3075
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3076
		rxq = &sc->vtnet_rxqs[i];
2783
		rxq = &sc->vtnet_rxqs[i];
3077
		vtnet_rxq_free_mbufs(rxq);
2784
		vtnet_rxq_free_mbufs(rxq);
3078
2785
Lines 3088-3100 Link Here
3088
	struct vtnet_txq *txq;
2795
	struct vtnet_txq *txq;
3089
	int i;
2796
	int i;
3090
2797
3091
	VTNET_CORE_LOCK_ASSERT(sc);
3092
3093
	/*
2798
	/*
3094
	 * Lock and unlock the per-queue mutex so we known the stop
2799
	 * Lock and unlock the per-queue mutex so we known the stop
3095
	 * state is visible. Doing only the active queues should be
2800
	 * state is visible. Doing only the active queues should be
3096
	 * sufficient, but it does not cost much extra to do all the
2801
	 * sufficient, but it does not cost much extra to do all the
3097
	 * queues.
2802
	 * queues. Note we hold the core mutex here too.
3098
	 */
2803
	 */
3099
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2804
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3100
		rxq = &sc->vtnet_rxqs[i];
2805
		rxq = &sc->vtnet_rxqs[i];
Lines 3133-3140 Link Here
3133
	virtio_stop(dev);
2838
	virtio_stop(dev);
3134
	vtnet_stop_rendezvous(sc);
2839
	vtnet_stop_rendezvous(sc);
3135
2840
2841
	/* Free any mbufs left in the virtqueues. */
3136
	vtnet_drain_rxtx_queues(sc);
2842
	vtnet_drain_rxtx_queues(sc);
3137
	sc->vtnet_act_vq_pairs = 1;
3138
}
2843
}
3139
2844
3140
static int
2845
static int
Lines 3143-3179 Link Here
3143
	device_t dev;
2848
	device_t dev;
3144
	struct ifnet *ifp;
2849
	struct ifnet *ifp;
3145
	uint64_t features;
2850
	uint64_t features;
3146
	int error;
2851
	int mask, error;
3147
2852
3148
	dev = sc->vtnet_dev;
2853
	dev = sc->vtnet_dev;
3149
	ifp = sc->vtnet_ifp;
2854
	ifp = sc->vtnet_ifp;
3150
	features = sc->vtnet_negotiated_features;
2855
	features = sc->vtnet_features;
3151
2856
2857
	mask = 0;
2858
#if defined(INET)
2859
	mask |= IFCAP_RXCSUM;
2860
#endif
2861
#if defined (INET6)
2862
	mask |= IFCAP_RXCSUM_IPV6;
2863
#endif
2864
3152
	/*
2865
	/*
3153
	 * Re-negotiate with the host, removing any disabled receive
2866
	 * Re-negotiate with the host, removing any disabled receive
3154
	 * features. Transmit features are disabled only on our side
2867
	 * features. Transmit features are disabled only on our side
3155
	 * via if_capenable and if_hwassist.
2868
	 * via if_capenable and if_hwassist.
3156
	 */
2869
	 */
3157
2870
3158
	if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
2871
	if (ifp->if_capabilities & mask) {
3159
		features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
2872
		/*
2873
		 * We require both IPv4 and IPv6 offloading to be enabled
2874
		 * in order to negotiated it: VirtIO does not distinguish
2875
		 * between the two.
2876
		 */
2877
		if ((ifp->if_capenable & mask) != mask)
2878
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2879
	}
3160
2880
3161
	if ((ifp->if_capenable & IFCAP_LRO) == 0)
2881
	if (ifp->if_capabilities & IFCAP_LRO) {
3162
		features &= ~VTNET_LRO_FEATURES;
2882
		if ((ifp->if_capenable & IFCAP_LRO) == 0)
2883
			features &= ~VTNET_LRO_FEATURES;
2884
	}
3163
2885
3164
	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2886
	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
3165
		features &= ~VIRTIO_NET_F_CTRL_VLAN;
2887
		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2888
			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2889
	}
3166
2890
3167
	error = virtio_reinit(dev, features);
2891
	error = virtio_reinit(dev, features);
3168
	if (error) {
2892
	if (error)
3169
		device_printf(dev, "virtio reinit error %d\n", error);
2893
		device_printf(dev, "virtio reinit error %d\n", error);
3170
		return (error);
3171
	}
3172
2894
3173
	sc->vtnet_features = features;
2895
	return (error);
3174
	virtio_reinit_complete(dev);
3175
3176
	return (0);
3177
}
2896
}
3178
2897
3179
static void
2898
static void
Lines 3184-3190 Link Here
3184
	ifp = sc->vtnet_ifp;
2903
	ifp = sc->vtnet_ifp;
3185
2904
3186
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2905
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2906
		/* Restore promiscuous and all-multicast modes. */
3187
		vtnet_rx_filter(sc);
2907
		vtnet_rx_filter(sc);
2908
		/* Restore filtered MAC addresses. */
3188
		vtnet_rx_filter_mac(sc);
2909
		vtnet_rx_filter_mac(sc);
3189
	}
2910
	}
3190
2911
Lines 3196-3225 Link Here
3196
vtnet_init_rx_queues(struct vtnet_softc *sc)
2917
vtnet_init_rx_queues(struct vtnet_softc *sc)
3197
{
2918
{
3198
	device_t dev;
2919
	device_t dev;
3199
	struct ifnet *ifp;
3200
	struct vtnet_rxq *rxq;
2920
	struct vtnet_rxq *rxq;
3201
	int i, clustersz, error;
2921
	int i, clsize, error;
3202
2922
3203
	dev = sc->vtnet_dev;
2923
	dev = sc->vtnet_dev;
3204
	ifp = sc->vtnet_ifp;
3205
2924
3206
	clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
2925
	/*
3207
	sc->vtnet_rx_clustersz = clustersz;
2926
	 * Use the new cluster size if one has been set (via a MTU
3208
2927
	 * change). Otherwise, use the standard 2K clusters.
3209
	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
2928
	 *
3210
		sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
2929
	 * BMV: It might make sense to use page sized clusters as
3211
		    VTNET_MAX_RX_SIZE, clustersz);
2930
	 * the default (depending on the features negotiated).
3212
		KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2931
	 */
3213
		    ("%s: too many rx mbufs %d for %d segments", __func__,
2932
	if (sc->vtnet_rx_new_clsize != 0) {
3214
		    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2933
		clsize = sc->vtnet_rx_new_clsize;
2934
		sc->vtnet_rx_new_clsize = 0;
3215
	} else
2935
	} else
3216
		sc->vtnet_rx_nmbufs = 1;
2936
		clsize = MCLBYTES;
3217
2937
3218
#ifdef DEV_NETMAP
2938
	sc->vtnet_rx_clsize = clsize;
3219
	if (vtnet_netmap_init_rx_buffers(sc))
2939
	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
3220
		return (0);
3221
#endif
3222
2940
2941
	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2942
	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2943
	    ("%s: too many rx mbufs %d for %d segments", __func__,
2944
	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2945
3223
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2946
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3224
		rxq = &sc->vtnet_rxqs[i];
2947
		rxq = &sc->vtnet_rxqs[i];
3225
2948
Lines 3229-3235 Link Here
3229
		VTNET_RXQ_UNLOCK(rxq);
2952
		VTNET_RXQ_UNLOCK(rxq);
3230
2953
3231
		if (error) {
2954
		if (error) {
3232
			device_printf(dev, "cannot populate Rx queue %d\n", i);
2955
			device_printf(dev,
2956
			    "cannot allocate mbufs for Rx queue %d\n", i);
3233
			return (error);
2957
			return (error);
3234
		}
2958
		}
3235
	}
2959
	}
Lines 3246-3252 Link Here
3246
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2970
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3247
		txq = &sc->vtnet_txqs[i];
2971
		txq = &sc->vtnet_txqs[i];
3248
		txq->vtntx_watchdog = 0;
2972
		txq->vtntx_watchdog = 0;
3249
		txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
3250
	}
2973
	}
3251
2974
3252
	return (0);
2975
	return (0);
Lines 3276-3360 Link Here
3276
2999
3277
	dev = sc->vtnet_dev;
3000
	dev = sc->vtnet_dev;
3278
3001
3279
	if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
3002
	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3280
		sc->vtnet_act_vq_pairs = 1;
3003
		sc->vtnet_act_vq_pairs = 1;
3281
		return;
3004
		return;
3282
	}
3005
	}
3283
3006
3284
	npairs = sc->vtnet_req_vq_pairs;
3007
	npairs = sc->vtnet_requested_vq_pairs;
3285
3008
3286
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3009
	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3287
		device_printf(dev, "cannot set active queue pairs to %d, "
3010
		device_printf(dev,
3288
		    "falling back to 1 queue pair\n", npairs);
3011
		    "cannot set active queue pairs to %d\n", npairs);
3289
		npairs = 1;
3012
		npairs = 1;
3290
	}
3013
	}
3291
3014
3292
	sc->vtnet_act_vq_pairs = npairs;
3015
	sc->vtnet_act_vq_pairs = npairs;
3293
}
3016
}
3294
3017
3295
static void
3296
vtnet_update_rx_offloads(struct vtnet_softc *sc)
3297
{
3298
	struct ifnet *ifp;
3299
	uint64_t features;
3300
	int error;
3301
3302
	ifp = sc->vtnet_ifp;
3303
	features = sc->vtnet_features;
3304
3305
	VTNET_CORE_LOCK_ASSERT(sc);
3306
3307
	if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
3308
		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3309
			features |= VIRTIO_NET_F_GUEST_CSUM;
3310
		else
3311
			features &= ~VIRTIO_NET_F_GUEST_CSUM;
3312
	}
3313
3314
	if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
3315
		if (ifp->if_capenable & IFCAP_LRO)
3316
			features |= VTNET_LRO_FEATURES;
3317
		else
3318
			features &= ~VTNET_LRO_FEATURES;
3319
	}
3320
3321
	error = vtnet_ctrl_guest_offloads(sc,
3322
	    features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3323
		        VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN  |
3324
			VIRTIO_NET_F_GUEST_UFO));
3325
	if (error) {
3326
		device_printf(sc->vtnet_dev,
3327
		    "%s: cannot update Rx features\n", __func__);
3328
		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3329
			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3330
			vtnet_init_locked(sc);
3331
		}
3332
	} else
3333
		sc->vtnet_features = features;
3334
}
3335
3336
static int
3018
static int
3337
vtnet_reinit(struct vtnet_softc *sc)
3019
vtnet_reinit(struct vtnet_softc *sc)
3338
{
3020
{
3339
	device_t dev;
3340
	struct ifnet *ifp;
3021
	struct ifnet *ifp;
3341
	int error;
3022
	int error;
3342
3023
3343
	dev = sc->vtnet_dev;
3344
	ifp = sc->vtnet_ifp;
3024
	ifp = sc->vtnet_ifp;
3345
3025
3026
	/* Use the current MAC address. */
3346
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3027
	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3028
	vtnet_set_hwaddr(sc);
3347
3029
3348
	error = vtnet_virtio_reinit(sc);
3349
	if (error)
3350
		return (error);
3351
3352
	vtnet_set_macaddr(sc);
3353
	vtnet_set_active_vq_pairs(sc);
3030
	vtnet_set_active_vq_pairs(sc);
3354
3031
3355
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3356
		vtnet_init_rx_filters(sc);
3357
3358
	ifp->if_hwassist = 0;
3032
	ifp->if_hwassist = 0;
3359
	if (ifp->if_capenable & IFCAP_TXCSUM)
3033
	if (ifp->if_capenable & IFCAP_TXCSUM)
3360
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3034
		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
Lines 3365-3374 Link Here
3365
	if (ifp->if_capenable & IFCAP_TSO6)
3039
	if (ifp->if_capenable & IFCAP_TSO6)
3366
		ifp->if_hwassist |= CSUM_IP6_TSO;
3040
		ifp->if_hwassist |= CSUM_IP6_TSO;
3367
3041
3042
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3043
		vtnet_init_rx_filters(sc);
3044
3368
	error = vtnet_init_rxtx_queues(sc);
3045
	error = vtnet_init_rxtx_queues(sc);
3369
	if (error)
3046
	if (error)
3370
		return (error);
3047
		return (error);
3371
3048
3049
	vtnet_enable_interrupts(sc);
3050
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3051
3372
	return (0);
3052
	return (0);
3373
}
3053
}
3374
3054
Lines 3388-3402 Link Here
3388
3068
3389
	vtnet_stop(sc);
3069
	vtnet_stop(sc);
3390
3070
3391
	if (vtnet_reinit(sc) != 0) {
3071
	/* Reinitialize with the host. */
3392
		vtnet_stop(sc);
3072
	if (vtnet_virtio_reinit(sc) != 0)
3393
		return;
3073
		goto fail;
3394
	}
3395
3074
3396
	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3075
	if (vtnet_reinit(sc) != 0)
3076
		goto fail;
3077
3078
	virtio_reinit_complete(dev);
3079
3397
	vtnet_update_link_status(sc);
3080
	vtnet_update_link_status(sc);
3398
	vtnet_enable_interrupts(sc);
3399
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3081
	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3082
3083
	return;
3084
3085
fail:
3086
	vtnet_stop(sc);
3400
}
3087
}
3401
3088
3402
static void
3089
static void
Lines 3406-3418 Link Here
3406
3093
3407
	sc = xsc;
3094
	sc = xsc;
3408
3095
3409
#ifdef DEV_NETMAP
3410
	if (!NA(sc->vtnet_ifp)) {
3411
		D("try to attach again");
3412
		vtnet_netmap_attach(sc);
3413
	}
3414
#endif
3415
3416
	VTNET_CORE_LOCK(sc);
3096
	VTNET_CORE_LOCK(sc);
3417
	vtnet_init_locked(sc);
3097
	vtnet_init_locked(sc);
3418
	VTNET_CORE_UNLOCK(sc);
3098
	VTNET_CORE_UNLOCK(sc);
Lines 3421-3433 Link Here
3421
static void
3101
static void
3422
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3102
vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3423
{
3103
{
3104
	struct virtqueue *vq;
3424
3105
3106
	vq = sc->vtnet_ctrl_vq;
3107
3425
	/*
3108
	/*
3426
	 * The control virtqueue is only polled and therefore it should
3109
	 * The control virtqueue is only polled and therefore it should
3427
	 * already be empty.
3110
	 * already be empty.
3428
	 */
3111
	 */
3429
	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
3112
	KASSERT(virtqueue_empty(vq),
3430
	    ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
3113
	    ("%s: ctrl vq %p not empty", __func__, vq));
3431
}
3114
}
3432
3115
3433
static void
3116
static void
Lines 3438-3525 Link Here
3438
3121
3439
	vq = sc->vtnet_ctrl_vq;
3122
	vq = sc->vtnet_ctrl_vq;
3440
3123
3441
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
3442
	VTNET_CORE_LOCK_ASSERT(sc);
3124
	VTNET_CORE_LOCK_ASSERT(sc);
3125
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3126
	    ("%s: CTRL_VQ feature not negotiated", __func__));
3443
3127
3444
	if (!virtqueue_empty(vq))
3128
	if (!virtqueue_empty(vq))
3445
		return;
3129
		return;
3130
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3131
		return;
3446
3132
3447
	/*
3133
	/*
3448
	 * Poll for the response, but the command is likely completed before
3134
	 * Poll for the response, but the command is likely already
3449
	 * returning from the notify.
3135
	 * done when we return from the notify.
3450
	 */
3136
	 */
3451
	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0)  {
3137
	virtqueue_notify(vq);
3452
		virtqueue_notify(vq);
3138
	virtqueue_poll(vq, NULL);
3453
		virtqueue_poll(vq, NULL);
3454
	}
3455
}
3139
}
3456
3140
3457
static int
3141
static int
3458
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3142
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3459
{
3143
{
3144
	struct virtio_net_ctrl_hdr hdr __aligned(2);
3460
	struct sglist_seg segs[3];
3145
	struct sglist_seg segs[3];
3461
	struct sglist sg;
3146
	struct sglist sg;
3462
	struct {
3147
	uint8_t ack;
3463
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3464
		uint8_t pad1;
3465
		uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
3466
		uint8_t pad2;
3467
		uint8_t ack;
3468
	} s;
3469
	int error;
3148
	int error;
3470
3149
3471
	error = 0;
3150
	hdr.class = VIRTIO_NET_CTRL_MAC;
3472
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
3151
	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3152
	ack = VIRTIO_NET_ERR;
3473
3153
3474
	s.hdr.class = VIRTIO_NET_CTRL_MAC;
3154
	sglist_init(&sg, 3, segs);
3475
	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3476
	bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
3477
	s.ack = VIRTIO_NET_ERR;
3478
3479
	sglist_init(&sg, nitems(segs), segs);
3480
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3481
	error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
3482
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3483
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3484
3485
	if (error == 0)
3486
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3487
3488
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3489
}
3490
3491
static int
3492
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3493
{
3494
	struct sglist_seg segs[3];
3495
	struct sglist sg;
3496
	struct {
3497
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3498
		uint8_t pad1;
3499
		uint64_t offloads __aligned(8);
3500
		uint8_t pad2;
3501
		uint8_t ack;
3502
	} s;
3503
	int error;
3504
3505
	error = 0;
3155
	error = 0;
3506
	MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3156
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3157
	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3158
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3159
	KASSERT(error == 0 && sg.sg_nseg == 3,
3160
	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
3507
3161
3508
	s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3162
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3509
	s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3510
	s.offloads = vtnet_gtoh64(sc, offloads);
3511
	s.ack = VIRTIO_NET_ERR;
3512
3163
3513
	sglist_init(&sg, nitems(segs), segs);
3164
	return (ack == VIRTIO_NET_OK ? 0 : EIO);
3514
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3515
	error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3516
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3517
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3518
3519
	if (error == 0)
3520
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3521
3522
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3523
}
3165
}
3524
3166
3525
static int
3167
static int
Lines 3528-3591 Link Here
3528
	struct sglist_seg segs[3];
3170
	struct sglist_seg segs[3];
3529
	struct sglist sg;
3171
	struct sglist sg;
3530
	struct {
3172
	struct {
3531
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3173
		struct virtio_net_ctrl_hdr hdr;
3532
		uint8_t pad1;
3174
		uint8_t pad1;
3533
		struct virtio_net_ctrl_mq mq __aligned(2);
3175
		struct virtio_net_ctrl_mq mq;
3534
		uint8_t pad2;
3176
		uint8_t pad2;
3535
		uint8_t ack;
3177
		uint8_t ack;
3536
	} s;
3178
	} s __aligned(2);
3537
	int error;
3179
	int error;
3538
3180
3539
	error = 0;
3540
	MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
3541
3542
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3181
	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3543
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3182
	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3544
	s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
3183
	s.mq.virtqueue_pairs = npairs;
3545
	s.ack = VIRTIO_NET_ERR;
3184
	s.ack = VIRTIO_NET_ERR;
3546
3185
3547
	sglist_init(&sg, nitems(segs), segs);
3186
	sglist_init(&sg, 3, segs);
3187
	error = 0;
3548
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3188
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3549
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3189
	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3550
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3190
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3551
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3191
	KASSERT(error == 0 && sg.sg_nseg == 3,
3192
	    ("%s: error %d adding MQ message to sglist", __func__, error));
3552
3193
3553
	if (error == 0)
3194
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3554
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3555
3195
3556
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3196
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3557
}
3197
}
3558
3198
3559
static int
3199
static int
3560
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on)
3200
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3561
{
3201
{
3562
	struct sglist_seg segs[3];
3202
	struct sglist_seg segs[3];
3563
	struct sglist sg;
3203
	struct sglist sg;
3564
	struct {
3204
	struct {
3565
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3205
		struct virtio_net_ctrl_hdr hdr;
3566
		uint8_t pad1;
3206
		uint8_t pad1;
3567
		uint8_t onoff;
3207
		uint8_t onoff;
3568
		uint8_t pad2;
3208
		uint8_t pad2;
3569
		uint8_t ack;
3209
		uint8_t ack;
3570
	} s;
3210
	} s __aligned(2);
3571
	int error;
3211
	int error;
3572
3212
3573
	error = 0;
3213
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3574
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3214
	    ("%s: CTRL_RX feature not negotiated", __func__));
3575
3215
3576
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3216
	s.hdr.class = VIRTIO_NET_CTRL_RX;
3577
	s.hdr.cmd = cmd;
3217
	s.hdr.cmd = cmd;
3578
	s.onoff = !!on;
3218
	s.onoff = !!on;
3579
	s.ack = VIRTIO_NET_ERR;
3219
	s.ack = VIRTIO_NET_ERR;
3580
3220
3581
	sglist_init(&sg, nitems(segs), segs);
3221
	sglist_init(&sg, 3, segs);
3222
	error = 0;
3582
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3223
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3583
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3224
	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3584
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3225
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3585
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3226
	KASSERT(error == 0 && sg.sg_nseg == 3,
3227
	    ("%s: error %d adding Rx message to sglist", __func__, error));
3586
3228
3587
	if (error == 0)
3229
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3588
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3589
3230
3590
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3231
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3591
}
3232
}
Lines 3593-3608 Link Here
3593
static int
3234
static int
3594
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3235
vtnet_set_promisc(struct vtnet_softc *sc, int on)
3595
{
3236
{
3237
3596
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3238
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3597
}
3239
}
3598
3240
3599
static int
3241
static int
3600
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3242
vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3601
{
3243
{
3244
3602
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3245
	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3603
}
3246
}
3604
3247
3248
/*
3249
 * The device defaults to promiscuous mode for backwards compatibility.
3250
 * Turn it off at attach time if possible.
3251
 */
3605
static void
3252
static void
3253
vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3254
{
3255
	struct ifnet *ifp;
3256
3257
	ifp = sc->vtnet_ifp;
3258
3259
	VTNET_CORE_LOCK(sc);
3260
	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3261
		ifp->if_flags |= IFF_PROMISC;
3262
	} else if (vtnet_set_promisc(sc, 0) != 0) {
3263
		ifp->if_flags |= IFF_PROMISC;
3264
		device_printf(sc->vtnet_dev,
3265
		    "cannot disable default promiscuous mode\n");
3266
	}
3267
	VTNET_CORE_UNLOCK(sc);
3268
}
3269
3270
static void
3606
vtnet_rx_filter(struct vtnet_softc *sc)
3271
vtnet_rx_filter(struct vtnet_softc *sc)
3607
{
3272
{
3608
	device_t dev;
3273
	device_t dev;
Lines 3613-3627 Link Here
3613
3278
3614
	VTNET_CORE_LOCK_ASSERT(sc);
3279
	VTNET_CORE_LOCK_ASSERT(sc);
3615
3280
3616
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
3281
	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3617
		device_printf(dev, "cannot %s promiscuous mode\n",
3282
		device_printf(dev, "cannot %s promiscuous mode\n",
3618
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3283
		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3619
	}
3620
3284
3621
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
3285
	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3622
		device_printf(dev, "cannot %s all-multicast mode\n",
3286
		device_printf(dev, "cannot %s all-multicast mode\n",
3623
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3287
		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3624
	}
3625
}
3288
}
3626
3289
3627
static void
3290
static void
Lines 3639-3653 Link Here
3639
3302
3640
	ifp = sc->vtnet_ifp;
3303
	ifp = sc->vtnet_ifp;
3641
	filter = sc->vtnet_mac_filter;
3304
	filter = sc->vtnet_mac_filter;
3642
3643
	ucnt = 0;
3305
	ucnt = 0;
3644
	mcnt = 0;
3306
	mcnt = 0;
3645
	promisc = 0;
3307
	promisc = 0;
3646
	allmulti = 0;
3308
	allmulti = 0;
3647
	error = 0;
3648
3309
3649
	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3650
	VTNET_CORE_LOCK_ASSERT(sc);
3310
	VTNET_CORE_LOCK_ASSERT(sc);
3311
	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3312
	    ("%s: CTRL_RX feature not negotiated", __func__));
3651
3313
3652
	/* Unicast MAC addresses: */
3314
	/* Unicast MAC addresses: */
3653
	if_addr_rlock(ifp);
3315
	if_addr_rlock(ifp);
Lines 3668-3673 Link Here
3668
	}
3330
	}
3669
	if_addr_runlock(ifp);
3331
	if_addr_runlock(ifp);
3670
3332
3333
	if (promisc != 0) {
3334
		filter->vmf_unicast.nentries = 0;
3335
		if_printf(ifp, "more than %d MAC addresses assigned, "
3336
		    "falling back to promiscuous mode\n",
3337
		    VTNET_MAX_MAC_ENTRIES);
3338
	} else
3339
		filter->vmf_unicast.nentries = ucnt;
3340
3671
	/* Multicast MAC addresses: */
3341
	/* Multicast MAC addresses: */
3672
	if_maddr_rlock(ifp);
3342
	if_maddr_rlock(ifp);
3673
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3343
	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
Lines 3684-3723 Link Here
3684
	}
3354
	}
3685
	if_maddr_runlock(ifp);
3355
	if_maddr_runlock(ifp);
3686
3356
3687
	if (promisc != 0) {
3688
		if_printf(ifp, "cannot filter more than %d MAC addresses, "
3689
		    "falling back to promiscuous mode\n",
3690
		    VTNET_MAX_MAC_ENTRIES);
3691
		ucnt = 0;
3692
	}
3693
	if (allmulti != 0) {
3357
	if (allmulti != 0) {
3694
		if_printf(ifp, "cannot filter more than %d multicast MAC "
3358
		filter->vmf_multicast.nentries = 0;
3695
		    "addresses, falling back to all-multicast mode\n",
3359
		if_printf(ifp, "more than %d multicast MAC addresses "
3360
		    "assigned, falling back to all-multicast mode\n",
3696
		    VTNET_MAX_MAC_ENTRIES);
3361
		    VTNET_MAX_MAC_ENTRIES);
3697
		mcnt = 0;
3362
	} else
3698
	}
3363
		filter->vmf_multicast.nentries = mcnt;
3699
3364
3700
	if (promisc != 0 && allmulti != 0)
3365
	if (promisc != 0 && allmulti != 0)
3701
		goto out;
3366
		goto out;
3702
3367
3703
	filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
3704
	filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
3705
3706
	hdr.class = VIRTIO_NET_CTRL_MAC;
3368
	hdr.class = VIRTIO_NET_CTRL_MAC;
3707
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3369
	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3708
	ack = VIRTIO_NET_ERR;
3370
	ack = VIRTIO_NET_ERR;
3709
3371
3710
	sglist_init(&sg, nitems(segs), segs);
3372
	sglist_init(&sg, 4, segs);
3373
	error = 0;
3711
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3374
	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3712
	error |= sglist_append(&sg, &filter->vmf_unicast,
3375
	error |= sglist_append(&sg, &filter->vmf_unicast,
3713
	    sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
3376
	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3714
	error |= sglist_append(&sg, &filter->vmf_multicast,
3377
	error |= sglist_append(&sg, &filter->vmf_multicast,
3715
	    sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
3378
	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3716
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3379
	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3717
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3380
	KASSERT(error == 0 && sg.sg_nseg == 4,
3381
	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3718
3382
3719
	if (error == 0)
3383
	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3720
		vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3384
3721
	if (ack != VIRTIO_NET_OK)
3385
	if (ack != VIRTIO_NET_OK)
3722
		if_printf(ifp, "error setting host MAC filter table\n");
3386
		if_printf(ifp, "error setting host MAC filter table\n");
3723
3387
Lines 3734-3763 Link Here
3734
	struct sglist_seg segs[3];
3398
	struct sglist_seg segs[3];
3735
	struct sglist sg;
3399
	struct sglist sg;
3736
	struct {
3400
	struct {
3737
		struct virtio_net_ctrl_hdr hdr __aligned(2);
3401
		struct virtio_net_ctrl_hdr hdr;
3738
		uint8_t pad1;
3402
		uint8_t pad1;
3739
		uint16_t tag __aligned(2);
3403
		uint16_t tag;
3740
		uint8_t pad2;
3404
		uint8_t pad2;
3741
		uint8_t ack;
3405
		uint8_t ack;
3742
	} s;
3406
	} s __aligned(2);
3743
	int error;
3407
	int error;
3744
3408
3745
	error = 0;
3746
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3747
3748
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3409
	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3749
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3410
	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3750
	s.tag = vtnet_gtoh16(sc, tag);
3411
	s.tag = tag;
3751
	s.ack = VIRTIO_NET_ERR;
3412
	s.ack = VIRTIO_NET_ERR;
3752
3413
3753
	sglist_init(&sg, nitems(segs), segs);
3414
	sglist_init(&sg, 3, segs);
3415
	error = 0;
3754
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3416
	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3755
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3417
	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3756
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3418
	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3757
	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3419
	KASSERT(error == 0 && sg.sg_nseg == 3,
3420
	    ("%s: error %d adding VLAN message to sglist", __func__, error));
3758
3421
3759
	if (error == 0)
3422
	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3760
		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3761
3423
3762
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3424
	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3763
}
3425
}
Lines 3765-3776 Link Here
3765
static void
3427
static void
3766
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3428
vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3767
{
3429
{
3768
	int i, bit;
3769
	uint32_t w;
3430
	uint32_t w;
3770
	uint16_t tag;
3431
	uint16_t tag;
3432
	int i, bit;
3771
3433
3772
	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3773
	VTNET_CORE_LOCK_ASSERT(sc);
3434
	VTNET_CORE_LOCK_ASSERT(sc);
3435
	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3436
	    ("%s: VLAN_FILTER feature not negotiated", __func__));
3774
3437
3775
	/* Enable the filter for each configured VLAN. */
3438
	/* Enable the filter for each configured VLAN. */
3776
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3439
	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
Lines 3839-3872 Link Here
3839
	vtnet_update_vlan_filter(arg, 0, tag);
3502
	vtnet_update_vlan_filter(arg, 0, tag);
3840
}
3503
}
3841
3504
3842
static void
3843
vtnet_update_speed_duplex(struct vtnet_softc *sc)
3844
{
3845
	struct ifnet *ifp;
3846
	uint32_t speed;
3847
3848
	ifp = sc->vtnet_ifp;
3849
3850
	if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
3851
		return;
3852
3853
	/* BMV: Ignore duplex. */
3854
	speed = virtio_read_dev_config_4(sc->vtnet_dev,
3855
	    offsetof(struct virtio_net_config, speed));
3856
	if (speed != -1)
3857
		ifp->if_baudrate = IF_Mbps(speed);
3858
}
3859
3860
static int
3505
static int
3861
vtnet_is_link_up(struct vtnet_softc *sc)
3506
vtnet_is_link_up(struct vtnet_softc *sc)
3862
{
3507
{
3508
	device_t dev;
3509
	struct ifnet *ifp;
3863
	uint16_t status;
3510
	uint16_t status;
3864
3511
3865
	if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
3512
	dev = sc->vtnet_dev;
3866
		return (1);
3513
	ifp = sc->vtnet_ifp;
3867
3514
3868
	status = virtio_read_dev_config_2(sc->vtnet_dev,
3515
	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3869
	    offsetof(struct virtio_net_config, status));
3516
		status = VIRTIO_NET_S_LINK_UP;
3517
	else
3518
		status = virtio_read_dev_config_2(dev,
3519
		    offsetof(struct virtio_net_config, status));
3870
3520
3871
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3521
	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3872
}
3522
}
Lines 3878-3889 Link Here
3878
	int link;
3528
	int link;
3879
3529
3880
	ifp = sc->vtnet_ifp;
3530
	ifp = sc->vtnet_ifp;
3531
3881
	VTNET_CORE_LOCK_ASSERT(sc);
3532
	VTNET_CORE_LOCK_ASSERT(sc);
3882
	link = vtnet_is_link_up(sc);
3533
	link = vtnet_is_link_up(sc);
3883
3534
3884
	/* Notify if the link status has changed. */
3535
	/* Notify if the link status has changed. */
3885
	if (link != 0 && sc->vtnet_link_active == 0) {
3536
	if (link != 0 && sc->vtnet_link_active == 0) {
3886
		vtnet_update_speed_duplex(sc);
3887
		sc->vtnet_link_active = 1;
3537
		sc->vtnet_link_active = 1;
3888
		if_link_state_change(ifp, LINK_STATE_UP);
3538
		if_link_state_change(ifp, LINK_STATE_UP);
3889
	} else if (link == 0 && sc->vtnet_link_active != 0) {
3539
	} else if (link == 0 && sc->vtnet_link_active != 0) {
Lines 3895-3901 Link Here
3895
static int
3545
static int
3896
vtnet_ifmedia_upd(struct ifnet *ifp)
3546
vtnet_ifmedia_upd(struct ifnet *ifp)
3897
{
3547
{
3898
	return (EOPNOTSUPP);
3548
	struct vtnet_softc *sc;
3549
	struct ifmedia *ifm;
3550
3551
	sc = ifp->if_softc;
3552
	ifm = &sc->vtnet_media;
3553
3554
	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3555
		return (EINVAL);
3556
3557
	return (0);
3899
}
3558
}
3900
3559
3901
static void
3560
static void
Lines 3911-3955 Link Here
3911
	VTNET_CORE_LOCK(sc);
3570
	VTNET_CORE_LOCK(sc);
3912
	if (vtnet_is_link_up(sc) != 0) {
3571
	if (vtnet_is_link_up(sc) != 0) {
3913
		ifmr->ifm_status |= IFM_ACTIVE;
3572
		ifmr->ifm_status |= IFM_ACTIVE;
3914
		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3573
		ifmr->ifm_active |= VTNET_MEDIATYPE;
3915
	} else
3574
	} else
3916
		ifmr->ifm_active |= IFM_NONE;
3575
		ifmr->ifm_active |= IFM_NONE;
3917
	VTNET_CORE_UNLOCK(sc);
3576
	VTNET_CORE_UNLOCK(sc);
3918
}
3577
}
3919
3578
3920
static void
3579
static void
3921
vtnet_get_macaddr(struct vtnet_softc *sc)
3580
vtnet_set_hwaddr(struct vtnet_softc *sc)
3922
{
3581
{
3923
3924
	if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3925
		virtio_read_device_config_array(sc->vtnet_dev,
3926
		    offsetof(struct virtio_net_config, mac),
3927
		    &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
3928
	} else {
3929
		/* Generate a random locally administered unicast address. */
3930
		sc->vtnet_hwaddr[0] = 0xB2;
3931
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3932
	}
3933
}
3934
3935
static void
3936
vtnet_set_macaddr(struct vtnet_softc *sc)
3937
{
3938
	device_t dev;
3582
	device_t dev;
3939
	int error;
3583
	int i;
3940
3584
3941
	dev = sc->vtnet_dev;
3585
	dev = sc->vtnet_dev;
3942
3586
3943
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3587
	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3944
		error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
3588
		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3945
		if (error)
3946
			device_printf(dev, "unable to set MAC address\n");
3589
			device_printf(dev, "unable to set MAC address\n");
3947
		return;
3590
	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3948
	}
3591
		for (i = 0; i < ETHER_ADDR_LEN; i++) {
3949
3950
	/* MAC in config is read-only in modern VirtIO. */
3951
	if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
3952
		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
3953
			virtio_write_dev_config_1(dev,
3592
			virtio_write_dev_config_1(dev,
3954
			    offsetof(struct virtio_net_config, mac) + i,
3593
			    offsetof(struct virtio_net_config, mac) + i,
3955
			    sc->vtnet_hwaddr[i]);
3594
			    sc->vtnet_hwaddr[i]);
Lines 3958-3969 Link Here
3958
}
3597
}
3959
3598
3960
static void
3599
static void
3961
vtnet_attached_set_macaddr(struct vtnet_softc *sc)
3600
vtnet_get_hwaddr(struct vtnet_softc *sc)
3962
{
3601
{
3602
	device_t dev;
3603
	int i;
3963
3604
3964
	/* Assign MAC address if it was generated. */
3605
	dev = sc->vtnet_dev;
3965
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
3606
3966
		vtnet_set_macaddr(sc);
3607
	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3608
		/*
3609
		 * Generate a random locally administered unicast address.
3610
		 *
3611
		 * It would be nice to generate the same MAC address across
3612
		 * reboots, but it seems all the hosts currently available
3613
		 * support the MAC feature, so this isn't too important.
3614
		 */
3615
		sc->vtnet_hwaddr[0] = 0xB2;
3616
		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3617
		vtnet_set_hwaddr(sc);
3618
		return;
3619
	}
3620
3621
	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3622
		sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3623
		    offsetof(struct virtio_net_config, mac) + i);
3624
	}
3967
}
3625
}
3968
3626
3969
static void
3627
static void
Lines 3994-3999 Link Here
3994
}
3652
}
3995
3653
3996
static void
3654
static void
3655
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3656
{
3657
	int size, thresh;
3658
3659
	size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3660
3661
	/*
3662
	 * The Tx interrupt is disabled until the queue free count falls
3663
	 * below our threshold. Completed frames are drained from the Tx
3664
	 * virtqueue before transmitting new frames and in the watchdog
3665
	 * callout, so the frequency of Tx interrupts is greatly reduced,
3666
	 * at the cost of not freeing mbufs as quickly as they otherwise
3667
	 * would be.
3668
	 *
3669
	 * N.B. We assume all the Tx queues are the same size.
3670
	 */
3671
	thresh = size / 4;
3672
3673
	/*
3674
	 * Without indirect descriptors, leave enough room for the most
3675
	 * segments we handle.
3676
	 */
3677
	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3678
	    thresh < sc->vtnet_tx_nsegs)
3679
		thresh = sc->vtnet_tx_nsegs;
3680
3681
	sc->vtnet_tx_intr_thresh = thresh;
3682
}
3683
3684
static void
3997
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3685
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3998
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3686
    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3999
{
3687
{
Lines 4021-4028 Link Here
4021
	    &stats->vrxs_csum, "Receive checksum offloaded");
3709
	    &stats->vrxs_csum, "Receive checksum offloaded");
4022
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3710
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
4023
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
3711
	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4024
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
4025
	    &stats->vrxs_host_lro, "Receive host segmentation offloaded");
4026
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3712
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4027
	    &stats->vrxs_rescheduled,
3713
	    &stats->vrxs_rescheduled,
4028
	    "Receive interrupt handler rescheduled");
3714
	    "Receive interrupt handler rescheduled");
Lines 4053-4059 Link Here
4053
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3739
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4054
	    &stats->vtxs_csum, "Transmit checksum offloaded");
3740
	    &stats->vtxs_csum, "Transmit checksum offloaded");
4055
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3741
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4056
	    &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
3742
	    &stats->vtxs_tso, "Transmit segmentation offloaded");
4057
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3743
	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4058
	    &stats->vtxs_rescheduled,
3744
	    &stats->vtxs_rescheduled,
4059
	    "Transmit interrupt handler rescheduled");
3745
	    "Transmit interrupt handler rescheduled");
Lines 4073-4079 Link Here
4073
	tree = device_get_sysctl_tree(dev);
3759
	tree = device_get_sysctl_tree(dev);
4074
	child = SYSCTL_CHILDREN(tree);
3760
	child = SYSCTL_CHILDREN(tree);
4075
3761
4076
	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
3762
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
4077
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3763
		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
4078
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3764
		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
4079
	}
3765
	}
Lines 4133-4152 Link Here
4133
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
3819
	    CTLFLAG_RD, &stats->rx_task_rescheduled,
4134
	    "Times the receive interrupt task rescheduled itself");
3820
	    "Times the receive interrupt task rescheduled itself");
4135
3821
4136
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
3822
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
4137
	    CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
3823
	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
4138
	    "Aborted transmit of checksum offloaded buffer with unknown "
3824
	    "Aborted transmit of checksum offloaded buffer with unknown "
4139
	    "Ethernet type");
3825
	    "Ethernet type");
4140
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
3826
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
4141
	    CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
3827
	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
4142
	    "Aborted transmit of checksum offloaded buffer because mismatched "
3828
	    "Aborted transmit of TSO buffer with unknown Ethernet type");
4143
	    "protocols");
4144
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3829
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
4145
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
3830
	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
4146
	    "Aborted transmit of TSO buffer with non TCP protocol");
3831
	    "Aborted transmit of TSO buffer with non TCP protocol");
4147
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4148
	    CTLFLAG_RD, &stats->tx_tso_without_csum,
4149
	    "Aborted transmit of TSO buffer without TCP checksum offload");
4150
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3832
	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
4151
	    CTLFLAG_RD, &stats->tx_defragged,
3833
	    CTLFLAG_RD, &stats->tx_defragged,
4152
	    "Transmit mbufs defragged");
3834
	    "Transmit mbufs defragged");
Lines 4179-4188 Link Here
4179
3861
4180
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3862
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
4181
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3863
	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
4182
	    "Number of maximum supported virtqueue pairs");
3864
	    "Maximum number of supported virtqueue pairs");
4183
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
3865
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
4184
	    CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
3866
	    CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
4185
	    "Number of requested virtqueue pairs");
3867
	    "Requested number of virtqueue pairs");
4186
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3868
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
4187
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3869
	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
4188
	    "Number of active virtqueue pairs");
3870
	    "Number of active virtqueue pairs");
Lines 4190-4208 Link Here
4190
	vtnet_setup_stat_sysctl(ctx, child, sc);
3872
	vtnet_setup_stat_sysctl(ctx, child, sc);
4191
}
3873
}
4192
3874
4193
static void
4194
vtnet_load_tunables(struct vtnet_softc *sc)
4195
{
4196
4197
	sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
4198
	    "lro_entry_count", vtnet_lro_entry_count);
4199
	if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
4200
		sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
4201
4202
	sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
4203
	    "lro_mbufq_depeth", vtnet_lro_mbufq_depth);
4204
}
4205
4206
static int
3875
static int
4207
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3876
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
4208
{
3877
{
Lines 4244-4257 Link Here
4244
static void
3913
static void
4245
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3914
vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4246
{
3915
{
4247
	struct vtnet_rxq *rxq;
4248
	int i;
3916
	int i;
4249
3917
4250
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3918
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4251
		rxq = &sc->vtnet_rxqs[i];
3919
		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
4252
		if (vtnet_rxq_enable_intr(rxq) != 0)
4253
			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
4254
	}
4255
}
3920
}
4256
3921
4257
static void
3922
static void
Lines 4276-4282 Link Here
4276
{
3941
{
4277
	int i;
3942
	int i;
4278
3943
4279
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3944
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4280
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3945
		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4281
}
3946
}
4282
3947
Lines 4285-4291 Link Here
4285
{
3950
{
4286
	int i;
3951
	int i;
4287
3952
4288
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
3953
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4289
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3954
		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4290
}
3955
}
4291
3956
Lines 4318-4326 Link Here
4318
	sc = if_getsoftc(ifp);
3983
	sc = if_getsoftc(ifp);
4319
3984
4320
	VTNET_CORE_LOCK(sc);
3985
	VTNET_CORE_LOCK(sc);
4321
	*nrxr = sc->vtnet_req_vq_pairs;
3986
	*nrxr = sc->vtnet_max_vq_pairs;
4322
	*ncl = NETDUMP_MAX_IN_FLIGHT;
3987
	*ncl = NETDUMP_MAX_IN_FLIGHT;
4323
	*clsize = sc->vtnet_rx_clustersz;
3988
	*clsize = sc->vtnet_rx_clsize;
4324
	VTNET_CORE_UNLOCK(sc);
3989
	VTNET_CORE_UNLOCK(sc);
4325
3990
4326
	/*
3991
	/*
Lines 4369-4375 Link Here
4369
		return (EBUSY);
4034
		return (EBUSY);
4370
4035
4371
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4036
	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4372
	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4037
	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4373
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4038
		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4374
	return (0);
4039
	return (0);
4375
}
4040
}
(-)sys/dev/virtio.ori/network/if_vtnetvar.h (-73 / +51 lines)
Lines 43-52 Link Here
43
	uint64_t	rx_csum_bad_ipproto;
43
	uint64_t	rx_csum_bad_ipproto;
44
	uint64_t	rx_csum_bad_offset;
44
	uint64_t	rx_csum_bad_offset;
45
	uint64_t	rx_csum_bad_proto;
45
	uint64_t	rx_csum_bad_proto;
46
	uint64_t	tx_csum_unknown_ethtype;
46
	uint64_t	tx_csum_bad_ethtype;
47
	uint64_t	tx_csum_proto_mismatch;
47
	uint64_t	tx_tso_bad_ethtype;
48
	uint64_t	tx_tso_not_tcp;
48
	uint64_t	tx_tso_not_tcp;
49
	uint64_t	tx_tso_without_csum;
50
	uint64_t	tx_defragged;
49
	uint64_t	tx_defragged;
51
	uint64_t	tx_defrag_failed;
50
	uint64_t	tx_defrag_failed;
52
51
Lines 68-74 Link Here
68
	uint64_t	vrxs_ierrors;	/* if_ierrors */
67
	uint64_t	vrxs_ierrors;	/* if_ierrors */
69
	uint64_t	vrxs_csum;
68
	uint64_t	vrxs_csum;
70
	uint64_t	vrxs_csum_failed;
69
	uint64_t	vrxs_csum_failed;
71
	uint64_t	vrxs_host_lro;
72
	uint64_t	vrxs_rescheduled;
70
	uint64_t	vrxs_rescheduled;
73
};
71
};
74
72
Lines 81-87 Link Here
81
	struct vtnet_rxq_stats	 vtnrx_stats;
79
	struct vtnet_rxq_stats	 vtnrx_stats;
82
	struct taskqueue	*vtnrx_tq;
80
	struct taskqueue	*vtnrx_tq;
83
	struct task		 vtnrx_intrtask;
81
	struct task		 vtnrx_intrtask;
84
	struct lro_ctrl		 vtnrx_lro;
85
#ifdef DEV_NETMAP
82
#ifdef DEV_NETMAP
86
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
83
	struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
87
#endif  /* DEV_NETMAP */
84
#endif  /* DEV_NETMAP */
Lines 114-120 Link Here
114
#endif
111
#endif
115
	int			 vtntx_id;
112
	int			 vtntx_id;
116
	int			 vtntx_watchdog;
113
	int			 vtntx_watchdog;
117
	int			 vtntx_intr_threshold;
118
	struct vtnet_txq_stats	 vtntx_stats;
114
	struct vtnet_txq_stats	 vtntx_stats;
119
	struct taskqueue	*vtntx_tq;
115
	struct taskqueue	*vtntx_tq;
120
	struct task		 vtntx_intrtask;
116
	struct task		 vtntx_intrtask;
Lines 140-149 Link Here
140
	struct ifnet		*vtnet_ifp;
136
	struct ifnet		*vtnet_ifp;
141
	struct vtnet_rxq	*vtnet_rxqs;
137
	struct vtnet_rxq	*vtnet_rxqs;
142
	struct vtnet_txq	*vtnet_txqs;
138
	struct vtnet_txq	*vtnet_txqs;
143
	uint64_t		 vtnet_features;
144
139
145
	uint32_t		 vtnet_flags;
140
	uint32_t		 vtnet_flags;
146
#define VTNET_FLAG_MODERN	 0x0001
141
#define VTNET_FLAG_SUSPENDED	 0x0001
147
#define VTNET_FLAG_MAC		 0x0002
142
#define VTNET_FLAG_MAC		 0x0002
148
#define VTNET_FLAG_CTRL_VQ	 0x0004
143
#define VTNET_FLAG_CTRL_VQ	 0x0004
149
#define VTNET_FLAG_CTRL_RX	 0x0008
144
#define VTNET_FLAG_CTRL_RX	 0x0008
Lines 152-184 Link Here
152
#define VTNET_FLAG_TSO_ECN	 0x0040
147
#define VTNET_FLAG_TSO_ECN	 0x0040
153
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
148
#define VTNET_FLAG_MRG_RXBUFS	 0x0080
154
#define VTNET_FLAG_LRO_NOMRG	 0x0100
149
#define VTNET_FLAG_LRO_NOMRG	 0x0100
155
#define VTNET_FLAG_MQ		 0x0200
150
#define VTNET_FLAG_MULTIQ	 0x0200
156
#define VTNET_FLAG_INDIRECT	 0x0400
151
#define VTNET_FLAG_INDIRECT	 0x0400
157
#define VTNET_FLAG_EVENT_IDX	 0x0800
152
#define VTNET_FLAG_EVENT_IDX	 0x0800
158
#define VTNET_FLAG_SUSPENDED	 0x1000
159
#define VTNET_FLAG_FIXUP_NEEDS_CSUM 0x2000
160
#define VTNET_FLAG_SW_LRO	 0x4000
161
153
154
	int			 vtnet_link_active;
162
	int			 vtnet_hdr_size;
155
	int			 vtnet_hdr_size;
163
	int			 vtnet_rx_nmbufs;
164
	int			 vtnet_rx_clustersz;
165
	int			 vtnet_rx_nsegs;
166
	int			 vtnet_rx_process_limit;
156
	int			 vtnet_rx_process_limit;
167
	int			 vtnet_link_active;
157
	int			 vtnet_rx_nsegs;
168
	int			 vtnet_act_vq_pairs;
158
	int			 vtnet_rx_nmbufs;
169
	int			 vtnet_req_vq_pairs;
159
	int			 vtnet_rx_clsize;
170
	int			 vtnet_max_vq_pairs;
160
	int			 vtnet_rx_new_clsize;
161
	int			 vtnet_tx_intr_thresh;
171
	int			 vtnet_tx_nsegs;
162
	int			 vtnet_tx_nsegs;
172
	int			 vtnet_if_flags;
163
	int			 vtnet_if_flags;
173
	int			 vtnet_max_mtu;
164
	int			 vtnet_act_vq_pairs;
174
	int			 vtnet_lro_entry_count;
165
	int			 vtnet_max_vq_pairs;
175
	int			 vtnet_lro_mbufq_depth;
166
	int			 vtnet_requested_vq_pairs;
176
167
177
	struct virtqueue	*vtnet_ctrl_vq;
168
	struct virtqueue	*vtnet_ctrl_vq;
178
	struct vtnet_mac_filter	*vtnet_mac_filter;
169
	struct vtnet_mac_filter	*vtnet_mac_filter;
179
	uint32_t		*vtnet_vlan_filter;
170
	uint32_t		*vtnet_vlan_filter;
180
171
181
	uint64_t		 vtnet_negotiated_features;
172
	uint64_t		 vtnet_features;
182
	struct vtnet_statistics	 vtnet_stats;
173
	struct vtnet_statistics	 vtnet_stats;
183
	struct callout		 vtnet_tick_ch;
174
	struct callout		 vtnet_tick_ch;
184
	struct ifmedia		 vtnet_media;
175
	struct ifmedia		 vtnet_media;
Lines 190-211 Link Here
190
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
181
	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
191
};
182
};
192
183
193
static bool
194
vtnet_modern(struct vtnet_softc *sc)
195
{
196
	return ((sc->vtnet_flags & VTNET_FLAG_MODERN) != 0);
197
}
198
199
static bool
200
vtnet_software_lro(struct vtnet_softc *sc)
201
{
202
	return ((sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0);
203
}
204
205
/*
184
/*
206
 * Maximum number of queue pairs we will autoconfigure to.
185
 * Maximum number of queue pairs we will autoconfigure to.
207
 */
186
 */
208
#define VTNET_MAX_QUEUE_PAIRS	32
187
#define VTNET_MAX_QUEUE_PAIRS	8
209
188
210
/*
189
/*
211
 * Additional completed entries can appear in a virtqueue before we can
190
 * Additional completed entries can appear in a virtqueue before we can
Lines 223-246 Link Here
223
#define VTNET_NOTIFY_RETRIES		4
202
#define VTNET_NOTIFY_RETRIES		4
224
203
225
/*
204
/*
205
 * Fake the media type. The host does not provide us with any real media
206
 * information.
207
 */
208
#define VTNET_MEDIATYPE		 (IFM_ETHER | IFM_10G_T | IFM_FDX)
209
210
/*
226
 * Number of words to allocate for the VLAN shadow table. There is one
211
 * Number of words to allocate for the VLAN shadow table. There is one
227
 * bit for each VLAN.
212
 * bit for each VLAN.
228
 */
213
 */
229
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
214
#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
230
215
231
/* We depend on these being the same size (and same layout). */
232
CTASSERT(sizeof(struct virtio_net_hdr_mrg_rxbuf) ==
233
    sizeof(struct virtio_net_hdr_v1));
234
235
/*
216
/*
236
 * In legacy VirtIO when mergeable buffers are not negotiated, this structure
217
 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
237
 * is placed at the beginning of the mbuf data. Use 4 bytes of pad to keep
218
 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
238
 * both the VirtIO header and the data non-contiguous and the frame's payload
219
 * both keep the VirtIO header and the data non-contiguous and to keep the
239
 * 4 byte aligned. Note this padding would not be necessary if the
220
 * frame's payload 4 byte aligned.
240
 * VIRTIO_F_ANY_LAYOUT feature was negotiated (but we don't support that yet).
241
 *
221
 *
242
 * In modern VirtIO or when mergeable buffers are negotiated, the host puts
222
 * When mergeable buffers are negotiated, the host puts the VirtIO header in
243
 * the VirtIO header in the beginning of the first mbuf's data.
223
 * the beginning of the first mbuf's data.
244
 */
224
 */
245
#define VTNET_RX_HEADER_PAD	4
225
#define VTNET_RX_HEADER_PAD	4
246
struct vtnet_rx_header {
226
struct vtnet_rx_header {
Lines 256-262 Link Here
256
	union {
236
	union {
257
		struct virtio_net_hdr		hdr;
237
		struct virtio_net_hdr		hdr;
258
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
238
		struct virtio_net_hdr_mrg_rxbuf	mhdr;
259
		struct virtio_net_hdr_v1	v1hdr;
260
	} vth_uhdr;
239
	} vth_uhdr;
261
240
262
	struct mbuf *vth_mbuf;
241
	struct mbuf *vth_mbuf;
Lines 271-281 Link Here
271
 */
250
 */
272
#define VTNET_MAX_MAC_ENTRIES	128
251
#define VTNET_MAX_MAC_ENTRIES	128
273
252
274
/*
275
 * The driver version of struct virtio_net_ctrl_mac but with our predefined
276
 * number of MAC addresses allocated. This structure is shared with the host,
277
 * so nentries field is in the correct VirtIO endianness.
278
 */
279
struct vtnet_mac_table {
253
struct vtnet_mac_table {
280
	uint32_t	nentries;
254
	uint32_t	nentries;
281
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
255
	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
Lines 301-316 Link Here
301
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
275
    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
302
276
303
/* Features desired/implemented by this driver. */
277
/* Features desired/implemented by this driver. */
304
#define VTNET_COMMON_FEATURES \
278
#define VTNET_FEATURES \
305
    (VIRTIO_NET_F_MAC			| \
279
    (VIRTIO_NET_F_MAC			| \
306
     VIRTIO_NET_F_STATUS		| \
280
     VIRTIO_NET_F_STATUS		| \
307
     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS	| \
308
     VIRTIO_NET_F_MTU			| \
309
     VIRTIO_NET_F_CTRL_VQ		| \
281
     VIRTIO_NET_F_CTRL_VQ		| \
310
     VIRTIO_NET_F_CTRL_RX		| \
282
     VIRTIO_NET_F_CTRL_RX		| \
311
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
283
     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
312
     VIRTIO_NET_F_CTRL_VLAN		| \
284
     VIRTIO_NET_F_CTRL_VLAN		| \
313
     VIRTIO_NET_F_CSUM			| \
285
     VIRTIO_NET_F_CSUM			| \
286
     VIRTIO_NET_F_GSO			| \
314
     VIRTIO_NET_F_HOST_TSO4		| \
287
     VIRTIO_NET_F_HOST_TSO4		| \
315
     VIRTIO_NET_F_HOST_TSO6		| \
288
     VIRTIO_NET_F_HOST_TSO6		| \
316
     VIRTIO_NET_F_HOST_ECN		| \
289
     VIRTIO_NET_F_HOST_ECN		| \
Lines 320-332 Link Here
320
     VIRTIO_NET_F_GUEST_ECN		| \
293
     VIRTIO_NET_F_GUEST_ECN		| \
321
     VIRTIO_NET_F_MRG_RXBUF		| \
294
     VIRTIO_NET_F_MRG_RXBUF		| \
322
     VIRTIO_NET_F_MQ			| \
295
     VIRTIO_NET_F_MQ			| \
323
     VIRTIO_NET_F_SPEED_DUPLEX		| \
324
     VIRTIO_RING_F_EVENT_IDX		| \
296
     VIRTIO_RING_F_EVENT_IDX		| \
325
     VIRTIO_RING_F_INDIRECT_DESC)
297
     VIRTIO_RING_F_INDIRECT_DESC)
326
298
327
#define VTNET_MODERN_FEATURES (VTNET_COMMON_FEATURES)
328
#define VTNET_LEGACY_FEATURES (VTNET_COMMON_FEATURES | VIRTIO_NET_F_GSO)
329
330
/*
299
/*
331
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
300
 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
332
 * frames larger than 1514 bytes.
301
 * frames larger than 1514 bytes.
Lines 336-374 Link Here
336
305
337
/*
306
/*
338
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
307
 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
339
 * frames larger than 1514 bytes.
308
 * frames larger than 1514 bytes. We do not yet support software LRO
340
					
309
 * via tcp_lro_rx().
341
 */
310
 */
342
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
311
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
343
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
312
    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
344
313
345
#define VTNET_MIN_MTU		68
346
#define VTNET_MAX_MTU		65536
314
#define VTNET_MAX_MTU		65536
347
#define VTNET_MAX_RX_SIZE	65550
315
#define VTNET_MAX_RX_SIZE	65550
348
316
349
/*
317
/*
350
 * Used to preallocate the VQ indirect descriptors. Modern and mergeable
318
 * Used to preallocate the Vq indirect descriptors. The first segment
351
 * buffers do not required one segment for the VirtIO header since it is
319
 * is reserved for the header, except for mergeable buffers since the
352
 * placed inline at the beginning of the receive buffer.
320
 * header is placed inline with the data.
353
 */
321
 */
354
#define VTNET_RX_SEGS_HDR_INLINE	1
322
#define VTNET_MRG_RX_SEGS	1
355
#define VTNET_RX_SEGS_HDR_SEPARATE	2
323
#define VTNET_MIN_RX_SEGS	2
356
#define VTNET_RX_SEGS_LRO_NOMRG		34
324
#define VTNET_MAX_RX_SEGS	34
357
#define VTNET_TX_SEGS_MIN		32
325
#define VTNET_MIN_TX_SEGS	32
358
#define VTNET_TX_SEGS_MAX		64
326
#define VTNET_MAX_TX_SEGS	64
359
327
360
/*
328
/*
361
 * Assert we can receive and transmit the maximum with regular
329
 * Assert we can receive and transmit the maximum with regular
362
 * size clusters.
330
 * size clusters.
363
 */
331
 */
364
CTASSERT(((VTNET_RX_SEGS_LRO_NOMRG - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
332
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
365
CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
333
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
366
334
367
/*
335
/*
368
 * Number of slots in the Tx bufrings. This value matches most other
336
 * Number of slots in the Tx bufrings. This value matches most other
369
 * multiqueue drivers.
337
 * multiqueue drivers.
370
 */
338
 */
371
#define VTNET_DEFAULT_BUFRING_SIZE	4096
339
#define VTNET_DEFAULT_BUFRING_SIZE	4096
340
341
/*
342
 * Determine how many mbufs are in each receive buffer. For LRO without
343
 * mergeable buffers, we must allocate an mbuf chain large enough to
344
 * hold both the vtnet_rx_header and the maximum receivable data.
345
 */
346
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\
347
	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
348
	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
349
	        (_clsize))
372
350
373
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
351
#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
374
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
352
#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
(-)sys/dev/virtio.ori/network/virtio_net.h (-78 / +25 lines)
Lines 34-66 Link Here
34
#define _VIRTIO_NET_H
34
#define _VIRTIO_NET_H
35
35
36
/* The feature bitmap for virtio net */
36
/* The feature bitmap for virtio net */
37
#define VIRTIO_NET_F_CSUM		 0x000001 /* Host handles pkts w/ partial csum */
37
#define VIRTIO_NET_F_CSUM	0x00001 /* Host handles pkts w/ partial csum */
38
#define VIRTIO_NET_F_GUEST_CSUM		 0x000002 /* Guest handles pkts w/ partial csum*/
38
#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
39
#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 0x000004 /* Dynamic offload configuration. */
39
#define VIRTIO_NET_F_MAC	0x00020 /* Host has given MAC address. */
40
#define VIRTIO_NET_F_MTU		 0x000008 /* Initial MTU advice */
40
#define VIRTIO_NET_F_GSO	0x00040 /* Host handles pkts w/ any GSO type */
41
#define VIRTIO_NET_F_MAC		 0x000020 /* Host has given MAC address. */
41
#define VIRTIO_NET_F_GUEST_TSO4	0x00080 /* Guest can handle TSOv4 in. */
42
#define VIRTIO_NET_F_GSO		 0x000040 /* Host handles pkts w/ any GSO type */
42
#define VIRTIO_NET_F_GUEST_TSO6	0x00100 /* Guest can handle TSOv6 in. */
43
#define VIRTIO_NET_F_GUEST_TSO4		 0x000080 /* Guest can handle TSOv4 in. */
43
#define VIRTIO_NET_F_GUEST_ECN	0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
44
#define VIRTIO_NET_F_GUEST_TSO6		 0x000100 /* Guest can handle TSOv6 in. */
44
#define VIRTIO_NET_F_GUEST_UFO	0x00400 /* Guest can handle UFO in. */
45
#define VIRTIO_NET_F_GUEST_ECN		 0x000200 /* Guest can handle TSO[6] w/ ECN in. */
45
#define VIRTIO_NET_F_HOST_TSO4	0x00800 /* Host can handle TSOv4 in. */
46
#define VIRTIO_NET_F_GUEST_UFO		 0x000400 /* Guest can handle UFO in. */
46
#define VIRTIO_NET_F_HOST_TSO6	0x01000 /* Host can handle TSOv6 in. */
47
#define VIRTIO_NET_F_HOST_TSO4		 0x000800 /* Host can handle TSOv4 in. */
47
#define VIRTIO_NET_F_HOST_ECN	0x02000 /* Host can handle TSO[6] w/ ECN in. */
48
#define VIRTIO_NET_F_HOST_TSO6		 0x001000 /* Host can handle TSOv6 in. */
48
#define VIRTIO_NET_F_HOST_UFO	0x04000 /* Host can handle UFO in. */
49
#define VIRTIO_NET_F_HOST_ECN		 0x002000 /* Host can handle TSO[6] w/ ECN in. */
49
#define VIRTIO_NET_F_MRG_RXBUF	0x08000 /* Host can merge receive buffers. */
50
#define VIRTIO_NET_F_HOST_UFO		 0x004000 /* Host can handle UFO in. */
50
#define VIRTIO_NET_F_STATUS	0x10000 /* virtio_net_config.status available*/
51
#define VIRTIO_NET_F_MRG_RXBUF		 0x008000 /* Host can merge receive buffers. */
51
#define VIRTIO_NET_F_CTRL_VQ	0x20000 /* Control channel available */
52
#define VIRTIO_NET_F_STATUS		 0x010000 /* virtio_net_config.status available*/
52
#define VIRTIO_NET_F_CTRL_RX	0x40000 /* Control channel RX mode support */
53
#define VIRTIO_NET_F_CTRL_VQ		 0x020000 /* Control channel available */
53
#define VIRTIO_NET_F_CTRL_VLAN	0x80000 /* Control channel VLAN filtering */
54
#define VIRTIO_NET_F_CTRL_RX		 0x040000 /* Control channel RX mode support */
54
#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
55
#define VIRTIO_NET_F_CTRL_VLAN		 0x080000 /* Control channel VLAN filtering */
55
#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on network */
56
#define VIRTIO_NET_F_CTRL_RX_EXTRA	 0x100000 /* Extra RX mode control support */
56
#define VIRTIO_NET_F_MQ		0x400000 /* Device supports RFS */
57
#define VIRTIO_NET_F_GUEST_ANNOUNCE	 0x200000 /* Announce device on network */
57
#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
58
#define VIRTIO_NET_F_MQ			 0x400000 /* Device supports Receive Flow Steering */
59
#define VIRTIO_NET_F_CTRL_MAC_ADDR	 0x800000 /* Set MAC address */
60
#define VIRTIO_NET_F_SPEED_DUPLEX	 (1ULL << 63) /* Device set linkspeed and duplex */
61
58
62
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
59
#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
63
#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
64
60
65
struct virtio_net_config {
61
struct virtio_net_config {
66
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
62
	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
Lines 72-103 Link Here
72
	 * Legal values are between 1 and 0x8000.
68
	 * Legal values are between 1 and 0x8000.
73
	 */
69
	 */
74
	uint16_t	max_virtqueue_pairs;
70
	uint16_t	max_virtqueue_pairs;
75
	/* Default maximum transmit unit advice */
76
	uint16_t	mtu;
77
	/*
78
	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
79
	 * Any other value stands for unknown.
80
	 */
81
	uint32_t	speed;
82
	/*
83
	 * 0x00 - half duplex
84
	 * 0x01 - full duplex
85
	 * Any other value stands for unknown.
86
	 */
87
	uint8_t		duplex;
88
} __packed;
71
} __packed;
89
72
90
/*
73
/*
91
 * This header comes first in the scatter-gather list.  If you don't
74
 * This is the first element of the scatter-gather list.  If you don't
92
 * specify GSO or CSUM features, you can simply ignore the header.
75
 * specify GSO or CSUM features, you can simply ignore the header.
93
 *
94
 * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
95
 * only flattened.
96
 */
76
 */
97
struct virtio_net_hdr_v1 {
77
struct virtio_net_hdr {
98
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start, csum_offset */
78
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use csum_start,csum_offset*/
99
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
79
#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
100
	uint8_t flags;
80
	uint8_t	flags;
101
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
81
#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame */
102
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
82
#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP (TSO) */
103
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
83
#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP (UFO) */
Lines 108-134 Link Here
108
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
88
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
109
	uint16_t csum_start;	/* Position to start checksumming from */
89
	uint16_t csum_start;	/* Position to start checksumming from */
110
	uint16_t csum_offset;	/* Offset after that to place checksum */
90
	uint16_t csum_offset;	/* Offset after that to place checksum */
111
	uint16_t num_buffers;	/* Number of merged rx buffers */
112
};
91
};
113
92
114
/*
93
/*
115
 * This header comes first in the scatter-gather list.
116
 * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
117
 * be the first element of the scatter-gather list.  If you don't
118
 * specify GSO or CSUM features, you can simply ignore the header.
119
 */
120
struct virtio_net_hdr {
121
	/* See VIRTIO_NET_HDR_F_* */
122
	uint8_t	flags;
123
	/* See VIRTIO_NET_HDR_GSO_* */
124
	uint8_t gso_type;
125
	uint16_t hdr_len;	/* Ethernet + IP + tcp/udp hdrs */
126
	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
127
	uint16_t csum_start;	/* Position to start checksumming from */
128
	uint16_t csum_offset;	/* Offset after that to place checksum */
129
};
130
131
/*
132
 * This is the version of the header to use when the MRG_RXBUF
94
 * This is the version of the header to use when the MRG_RXBUF
133
 * feature has been negotiated.
95
 * feature has been negotiated.
134
 */
96
 */
Lines 238-257 Link Here
238
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
200
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
239
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
201
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
240
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
202
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
241
242
/*
243
 * Control network offloads
244
 *
245
 * Reconfigures the network offloads that Guest can handle.
246
 *
247
 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
248
 *
249
 * Command data format matches the feature bit mask exactly.
250
 *
251
 * See VIRTIO_NET_F_GUEST_* for the list of offloads
252
 * that can be enabled/disabled.
253
 */
254
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS		5
255
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET	0
256
203
257
#endif /* _VIRTIO_NET_H */
204
#endif /* _VIRTIO_NET_H */
(-)sys/dev/virtio.ori/pci/virtio_pci.c (-508 / +838 lines)
Lines 1-7 Link Here
1
/*-
1
/*-
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
2
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3
 *
3
 *
4
 * Copyright (c) 2017, Bryan Venteicher <bryanv@FreeBSD.org>
4
 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5
 * All rights reserved.
5
 * All rights reserved.
6
 *
6
 *
7
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
Lines 35-42 Link Here
35
#include <sys/systm.h>
35
#include <sys/systm.h>
36
#include <sys/bus.h>
36
#include <sys/bus.h>
37
#include <sys/kernel.h>
37
#include <sys/kernel.h>
38
#include <sys/sbuf.h>
39
#include <sys/sysctl.h>
40
#include <sys/module.h>
38
#include <sys/module.h>
41
#include <sys/malloc.h>
39
#include <sys/malloc.h>
42
40
Lines 51-291 Link Here
51
#include <dev/virtio/virtio.h>
49
#include <dev/virtio/virtio.h>
52
#include <dev/virtio/virtqueue.h>
50
#include <dev/virtio/virtqueue.h>
53
#include <dev/virtio/pci/virtio_pci.h>
51
#include <dev/virtio/pci/virtio_pci.h>
54
#include <dev/virtio/pci/virtio_pci_var.h>
55
52
56
#include "virtio_pci_if.h"
53
#include "virtio_bus_if.h"
57
#include "virtio_if.h"
54
#include "virtio_if.h"
58
55
59
static void	vtpci_describe_features(struct vtpci_common *, const char *,
56
struct vtpci_interrupt {
57
	struct resource		*vti_irq;
58
	int			 vti_rid;
59
	void			*vti_handler;
60
};
61
62
struct vtpci_virtqueue {
63
	struct virtqueue	*vtv_vq;
64
	int			 vtv_no_intr;
65
};
66
67
struct vtpci_softc {
68
	device_t			 vtpci_dev;
69
	struct resource			*vtpci_res;
70
	struct resource			*vtpci_msix_res;
71
	uint64_t			 vtpci_features;
72
	uint32_t			 vtpci_flags;
73
#define VTPCI_FLAG_NO_MSI		0x0001
74
#define VTPCI_FLAG_NO_MSIX		0x0002
75
#define VTPCI_FLAG_LEGACY		0x1000
76
#define VTPCI_FLAG_MSI			0x2000
77
#define VTPCI_FLAG_MSIX			0x4000
78
#define VTPCI_FLAG_SHARED_MSIX		0x8000
79
#define VTPCI_FLAG_ITYPE_MASK		0xF000
80
81
	/* This "bus" will only ever have one child. */
82
	device_t			 vtpci_child_dev;
83
	struct virtio_feature_desc	*vtpci_child_feat_desc;
84
85
	int				 vtpci_nvqs;
86
	struct vtpci_virtqueue		*vtpci_vqs;
87
88
	/*
89
	 * Ideally, each virtqueue that the driver provides a callback for will
90
	 * receive its own MSIX vector. If there are not sufficient vectors
91
	 * available, then attempt to have all the VQs share one vector. For
92
	 * MSIX, the configuration changed notifications must be on their own
93
	 * vector.
94
	 *
95
	 * If MSIX is not available, we will attempt to have the whole device
96
	 * share one MSI vector, and then, finally, one legacy interrupt.
97
	 */
98
	struct vtpci_interrupt		 vtpci_device_interrupt;
99
	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
100
	int				 vtpci_nmsix_resources;
101
};
102
103
static int	vtpci_probe(device_t);
104
static int	vtpci_attach(device_t);
105
static int	vtpci_detach(device_t);
106
static int	vtpci_suspend(device_t);
107
static int	vtpci_resume(device_t);
108
static int	vtpci_shutdown(device_t);
109
static void	vtpci_driver_added(device_t, driver_t *);
110
static void	vtpci_child_detached(device_t, device_t);
111
static int	vtpci_read_ivar(device_t, device_t, int, uintptr_t *);
112
static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
113
114
static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
115
static int	vtpci_with_feature(device_t, uint64_t);
116
static int	vtpci_alloc_virtqueues(device_t, int, int,
117
		    struct vq_alloc_info *);
118
static int	vtpci_setup_intr(device_t, enum intr_type);
119
static void	vtpci_stop(device_t);
120
static int	vtpci_reinit(device_t, uint64_t);
121
static void	vtpci_reinit_complete(device_t);
122
static void	vtpci_notify_virtqueue(device_t, uint16_t);
123
static uint8_t	vtpci_get_status(device_t);
124
static void	vtpci_set_status(device_t, uint8_t);
125
static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
126
static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
127
128
static void	vtpci_describe_features(struct vtpci_softc *, const char *,
60
		    uint64_t);
129
		    uint64_t);
61
static int	vtpci_alloc_msix(struct vtpci_common *, int);
130
static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
62
static int	vtpci_alloc_msi(struct vtpci_common *);
131
63
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_common *);
132
static int	vtpci_alloc_msix(struct vtpci_softc *, int);
64
static int	vtpci_alloc_intr_msix_shared(struct vtpci_common *);
133
static int	vtpci_alloc_msi(struct vtpci_softc *);
65
static int	vtpci_alloc_intr_msi(struct vtpci_common *);
134
static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
66
static int	vtpci_alloc_intr_intx(struct vtpci_common *);
135
static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
67
static int	vtpci_alloc_interrupt(struct vtpci_common *, int, int,
136
static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
137
static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
138
static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
68
		    struct vtpci_interrupt *);
139
		    struct vtpci_interrupt *);
69
static void	vtpci_free_interrupt(struct vtpci_common *,
140
static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
70
		    struct vtpci_interrupt *);
71
141
72
static void	vtpci_free_interrupts(struct vtpci_common *);
142
static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
73
static void	vtpci_free_virtqueues(struct vtpci_common *);
74
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_common *);
75
static int	vtpci_alloc_intr_resources(struct vtpci_common *);
76
static int	vtpci_setup_intx_interrupt(struct vtpci_common *,
77
		    enum intr_type);
143
		    enum intr_type);
78
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_common *,
144
static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
79
		    enum intr_type);
145
		    enum intr_type);
80
static int	vtpci_set_host_msix_vectors(struct vtpci_common *);
146
static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
81
static int	vtpci_setup_msix_interrupts(struct vtpci_common *,
82
		    enum intr_type);
147
		    enum intr_type);
83
static int	vtpci_setup_intrs(struct vtpci_common *, enum intr_type);
148
static int	vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type);
84
static int	vtpci_reinit_virtqueue(struct vtpci_common *, int);
149
85
static void	vtpci_intx_intr(void *);
150
static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
151
		    struct vtpci_interrupt *);
152
static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
153
static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
154
155
static void	vtpci_free_interrupt(struct vtpci_softc *,
156
		    struct vtpci_interrupt *);
157
static void	vtpci_free_interrupts(struct vtpci_softc *);
158
static void	vtpci_free_virtqueues(struct vtpci_softc *);
159
static void	vtpci_release_child_resources(struct vtpci_softc *);
160
static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
161
static void	vtpci_reset(struct vtpci_softc *);
162
163
static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
164
165
static void	vtpci_legacy_intr(void *);
86
static int	vtpci_vq_shared_intr_filter(void *);
166
static int	vtpci_vq_shared_intr_filter(void *);
87
static void	vtpci_vq_shared_intr(void *);
167
static void	vtpci_vq_shared_intr(void *);
88
static int	vtpci_vq_intr_filter(void *);
168
static int	vtpci_vq_intr_filter(void *);
89
static void	vtpci_vq_intr(void *);
169
static void	vtpci_vq_intr(void *);
90
static void	vtpci_config_intr(void *);
170
static void	vtpci_config_intr(void *);
91
static void	vtpci_setup_sysctl(struct vtpci_common *);
92
171
93
#define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt
172
#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
94
173
174
#define VIRTIO_PCI_CONFIG(_sc) \
175
    VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0)
176
95
/*
177
/*
96
 * This module contains two drivers:
178
 * I/O port read/write wrappers.
97
 *   - virtio_pci_legacy (vtpcil) for pre-V1 support
98
 *   - virtio_pci_modern (vtpcim) for V1 support
99
 */
179
 */
100
MODULE_VERSION(virtio_pci, 1);
180
#define vtpci_read_config_1(sc, o)	bus_read_1((sc)->vtpci_res, (o))
101
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
181
#define vtpci_read_config_2(sc, o)	bus_read_2((sc)->vtpci_res, (o))
102
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
182
#define vtpci_read_config_4(sc, o)	bus_read_4((sc)->vtpci_res, (o))
183
#define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o), (v))
184
#define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o), (v))
185
#define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o), (v))
103
186
104
int vtpci_disable_msix = 0;
187
/* Tunables. */
188
static int vtpci_disable_msix = 0;
105
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
189
TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
106
190
107
static uint8_t
191
static device_method_t vtpci_methods[] = {
108
vtpci_read_isr(struct vtpci_common *cn)
192
	/* Device interface. */
109
{
193
	DEVMETHOD(device_probe,			  vtpci_probe),
110
	return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev));
194
	DEVMETHOD(device_attach,		  vtpci_attach),
111
}
195
	DEVMETHOD(device_detach,		  vtpci_detach),
196
	DEVMETHOD(device_suspend,		  vtpci_suspend),
197
	DEVMETHOD(device_resume,		  vtpci_resume),
198
	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
112
199
113
static uint16_t
200
	/* Bus interface. */
114
vtpci_get_vq_size(struct vtpci_common *cn, int idx)
201
	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
115
{
202
	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
116
	return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx));
203
	DEVMETHOD(bus_read_ivar,		  vtpci_read_ivar),
117
}
204
	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
118
205
119
static bus_size_t
206
	/* VirtIO bus interface. */
120
vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx)
207
	DEVMETHOD(virtio_bus_negotiate_features,  vtpci_negotiate_features),
121
{
208
	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
122
	return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx));
209
	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
123
}
210
	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
211
	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
212
	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
213
	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
214
	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
215
	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
216
	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
124
217
125
static void
218
	DEVMETHOD_END
126
vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq)
219
};
127
{
128
	VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq);
129
}
130
220
131
static void
221
static driver_t vtpci_driver = {
132
vtpci_disable_vq(struct vtpci_common *cn, int idx)
222
	"virtio_pci",
133
{
223
	vtpci_methods,
134
	VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx);
224
	sizeof(struct vtpci_softc)
135
}
225
};
136
226
227
devclass_t vtpci_devclass;
228
229
DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
230
MODULE_VERSION(virtio_pci, 1);
231
MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
232
MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
233
137
static int
234
static int
138
vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr)
235
vtpci_probe(device_t dev)
139
{
236
{
140
	return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr));
237
	char desc[36];
238
	const char *name;
239
240
	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
241
		return (ENXIO);
242
243
	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
244
	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
245
		return (ENXIO);
246
247
	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
248
		return (ENXIO);
249
250
	name = virtio_device_name(pci_get_subdevice(dev));
251
	if (name == NULL)
252
		name = "Unknown";
253
254
	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
255
	device_set_desc_copy(dev, desc);
256
257
	return (BUS_PROBE_DEFAULT);
141
}
258
}
142
259
143
static int
260
static int
144
vtpci_register_vq_msix(struct vtpci_common *cn, int idx,
261
vtpci_attach(device_t dev)
145
    struct vtpci_interrupt *intr)
146
{
262
{
147
	return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr));
263
	struct vtpci_softc *sc;
148
}
264
	device_t child;
265
	int rid;
149
266
150
void
267
	sc = device_get_softc(dev);
151
vtpci_init(struct vtpci_common *cn, device_t dev, bool modern)
268
	sc->vtpci_dev = dev;
152
{
153
269
154
	cn->vtpci_dev = dev;
155
156
	pci_enable_busmaster(dev);
270
	pci_enable_busmaster(dev);
157
271
158
	if (modern)
272
	rid = PCIR_BAR(0);
159
		cn->vtpci_flags |= VTPCI_FLAG_MODERN;
273
	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
274
	    RF_ACTIVE);
275
	if (sc->vtpci_res == NULL) {
276
		device_printf(dev, "cannot map I/O space\n");
277
		return (ENXIO);
278
	}
279
160
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
280
	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
161
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSI;
281
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
162
	if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0)
163
		cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
164
282
165
	vtpci_setup_sysctl(cn);
283
	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
166
}
284
		rid = PCIR_BAR(1);
285
		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
286
		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
287
	}
167
288
168
int
289
	if (sc->vtpci_msix_res == NULL)
169
vtpci_add_child(struct vtpci_common *cn)
290
		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
170
{
171
	device_t dev, child;
172
291
173
	dev = cn->vtpci_dev;
292
	vtpci_reset(sc);
174
293
175
	child = device_add_child(dev, NULL, -1);
294
	/* Tell the host we've noticed this device. */
176
	if (child == NULL) {
295
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
296
297
	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
177
		device_printf(dev, "cannot create child device\n");
298
		device_printf(dev, "cannot create child device\n");
299
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
300
		vtpci_detach(dev);
178
		return (ENOMEM);
301
		return (ENOMEM);
179
	}
302
	}
180
303
181
	cn->vtpci_child_dev = child;
304
	sc->vtpci_child_dev = child;
305
	vtpci_probe_and_attach_child(sc);
182
306
183
	return (0);
307
	return (0);
184
}
308
}
185
309
186
int
310
static int
187
vtpci_delete_child(struct vtpci_common *cn)
311
vtpci_detach(device_t dev)
188
{
312
{
189
	device_t dev, child;
313
	struct vtpci_softc *sc;
314
	device_t child;
190
	int error;
315
	int error;
191
316
192
	dev = cn->vtpci_dev;
317
	sc = device_get_softc(dev);
193
318
194
	child = cn->vtpci_child_dev;
319
	if ((child = sc->vtpci_child_dev) != NULL) {
195
	if (child != NULL) {
196
		error = device_delete_child(dev, child);
320
		error = device_delete_child(dev, child);
197
		if (error)
321
		if (error)
198
			return (error);
322
			return (error);
199
		cn->vtpci_child_dev = NULL;
323
		sc->vtpci_child_dev = NULL;
200
	}
324
	}
201
325
326
	vtpci_reset(sc);
327
328
	if (sc->vtpci_msix_res != NULL) {
329
		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
330
		    sc->vtpci_msix_res);
331
		sc->vtpci_msix_res = NULL;
332
	}
333
334
	if (sc->vtpci_res != NULL) {
335
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
336
		    sc->vtpci_res);
337
		sc->vtpci_res = NULL;
338
	}
339
202
	return (0);
340
	return (0);
203
}
341
}
204
342
205
void
343
static int
206
vtpci_child_detached(struct vtpci_common *cn)
344
vtpci_suspend(device_t dev)
207
{
345
{
208
346
209
	vtpci_release_child_resources(cn);
347
	return (bus_generic_suspend(dev));
210
211
	cn->vtpci_child_feat_desc = NULL;
212
	cn->vtpci_host_features = 0;
213
	cn->vtpci_features = 0;
214
}
348
}
215
349
216
int
350
static int
217
vtpci_reinit(struct vtpci_common *cn)
351
vtpci_resume(device_t dev)
218
{
352
{
219
	int idx, error;
220
353
221
	for (idx = 0; idx < cn->vtpci_nvqs; idx++) {
354
	return (bus_generic_resume(dev));
222
		error = vtpci_reinit_virtqueue(cn, idx);
355
}
223
		if (error)
224
			return (error);
225
	}
226
356
227
	if (vtpci_is_msix_enabled(cn)) {
357
static int
228
		error = vtpci_set_host_msix_vectors(cn);
358
vtpci_shutdown(device_t dev)
229
		if (error)
359
{
230
			return (error);
231
	}
232
360
361
	(void) bus_generic_shutdown(dev);
362
	/* Forcibly stop the host device. */
363
	vtpci_stop(dev);
364
233
	return (0);
365
	return (0);
234
}
366
}
235
367
236
static void
368
static void
237
vtpci_describe_features(struct vtpci_common *cn, const char *msg,
369
vtpci_driver_added(device_t dev, driver_t *driver)
238
    uint64_t features)
239
{
370
{
240
	device_t dev, child;
371
	struct vtpci_softc *sc;
241
372
242
	dev = cn->vtpci_dev;
373
	sc = device_get_softc(dev);
243
	child = cn->vtpci_child_dev;
244
374
245
	if (device_is_attached(child) || bootverbose == 0)
375
	vtpci_probe_and_attach_child(sc);
246
		return;
247
248
	virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc);
249
}
376
}
250
377
251
uint64_t
378
static void
252
vtpci_negotiate_features(struct vtpci_common *cn,
379
vtpci_child_detached(device_t dev, device_t child)
253
    uint64_t child_features, uint64_t host_features)
254
{
380
{
255
	uint64_t features;
381
	struct vtpci_softc *sc;
256
382
257
	cn->vtpci_host_features = host_features;
383
	sc = device_get_softc(dev);
258
	vtpci_describe_features(cn, "host", host_features);
259
384
260
	/*
385
	vtpci_reset(sc);
261
	 * Limit negotiated features to what the driver, virtqueue, and
386
	vtpci_release_child_resources(sc);
262
	 * host all support.
263
	 */
264
	features = host_features & child_features;
265
	features = virtio_filter_transport_features(features);
266
267
	cn->vtpci_features = features;
268
	vtpci_describe_features(cn, "negotiated", features);
269
270
	return (features);
271
}
387
}
272
388
273
int
389
static int
274
vtpci_with_feature(struct vtpci_common *cn, uint64_t feature)
390
vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
275
{
391
{
276
	return ((cn->vtpci_features & feature) != 0);
392
	struct vtpci_softc *sc;
277
}
278
393
279
int
394
	sc = device_get_softc(dev);
280
vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result)
281
{
282
	device_t dev;
283
	int error;
284
395
285
	dev = cn->vtpci_dev;
396
	if (sc->vtpci_child_dev != child)
286
	error = 0;
397
		return (ENOENT);
287
398
288
	switch (index) {
399
	switch (index) {
400
	case VIRTIO_IVAR_DEVTYPE:
289
	case VIRTIO_IVAR_SUBDEVICE:
401
	case VIRTIO_IVAR_SUBDEVICE:
290
		*result = pci_get_subdevice(dev);
402
		*result = pci_get_subdevice(dev);
291
		break;
403
		break;
Lines 298-371 Link Here
298
	case VIRTIO_IVAR_SUBVENDOR:
410
	case VIRTIO_IVAR_SUBVENDOR:
299
		*result = pci_get_subdevice(dev);
411
		*result = pci_get_subdevice(dev);
300
		break;
412
		break;
301
	case VIRTIO_IVAR_MODERN:
302
		*result = vtpci_is_modern(cn);
303
		break;
304
	default:
413
	default:
305
		error = ENOENT;
414
		return (ENOENT);
306
	}
415
	}
307
416
308
	return (error);
417
	return (0);
309
}
418
}
310
419
311
int
420
static int
312
vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value)
421
vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
313
{
422
{
314
	int error;
423
	struct vtpci_softc *sc;
315
424
316
	error = 0;
425
	sc = device_get_softc(dev);
317
426
427
	if (sc->vtpci_child_dev != child)
428
		return (ENOENT);
429
318
	switch (index) {
430
	switch (index) {
319
	case VIRTIO_IVAR_FEATURE_DESC:
431
	case VIRTIO_IVAR_FEATURE_DESC:
320
		cn->vtpci_child_feat_desc = (void *) value;
432
		sc->vtpci_child_feat_desc = (void *) value;
321
		break;
433
		break;
322
	default:
434
	default:
323
		error = ENOENT;
435
		return (ENOENT);
324
	}
436
	}
325
437
326
	return (error);
438
	return (0);
327
}
439
}
328
440
329
int
441
static uint64_t
330
vtpci_alloc_virtqueues(struct vtpci_common *cn, int flags, int nvqs,
442
vtpci_negotiate_features(device_t dev, uint64_t child_features)
331
    struct vq_alloc_info *vq_info)
332
{
443
{
333
	device_t dev;
444
	struct vtpci_softc *sc;
334
	int idx, align, error;
445
	uint64_t host_features, features;
335
446
336
	dev = cn->vtpci_dev;
447
	sc = device_get_softc(dev);
337
448
449
	host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
450
	vtpci_describe_features(sc, "host", host_features);
451
338
	/*
452
	/*
339
	 * This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO,
453
	 * Limit negotiated features to what the driver, virtqueue, and
340
	 * the tables do not have to be allocated contiguously, but we do so
454
	 * host all support.
341
	 * anyways.
342
	 */
455
	 */
343
	align = 4096;
456
	features = host_features & child_features;
457
	features = virtqueue_filter_features(features);
458
	sc->vtpci_features = features;
344
459
345
	if (cn->vtpci_nvqs != 0)
460
	vtpci_describe_features(sc, "negotiated", features);
461
	vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
462
463
	return (features);
464
}
465
466
static int
467
vtpci_with_feature(device_t dev, uint64_t feature)
468
{
469
	struct vtpci_softc *sc;
470
471
	sc = device_get_softc(dev);
472
473
	return ((sc->vtpci_features & feature) != 0);
474
}
475
476
static int
477
vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
478
    struct vq_alloc_info *vq_info)
479
{
480
	struct vtpci_softc *sc;
481
	struct virtqueue *vq;
482
	struct vtpci_virtqueue *vqx;
483
	struct vq_alloc_info *info;
484
	int idx, error;
485
	uint16_t size;
486
487
	sc = device_get_softc(dev);
488
489
	if (sc->vtpci_nvqs != 0)
346
		return (EALREADY);
490
		return (EALREADY);
347
	if (nvqs <= 0)
491
	if (nvqs <= 0)
348
		return (EINVAL);
492
		return (EINVAL);
349
493
350
	cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
494
	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
351
	    M_DEVBUF, M_NOWAIT | M_ZERO);
495
	    M_DEVBUF, M_NOWAIT | M_ZERO);
352
	if (cn->vtpci_vqs == NULL)
496
	if (sc->vtpci_vqs == NULL)
353
		return (ENOMEM);
497
		return (ENOMEM);
354
498
355
	for (idx = 0; idx < nvqs; idx++) {
499
	for (idx = 0; idx < nvqs; idx++) {
356
		struct vtpci_virtqueue *vqx;
500
		vqx = &sc->vtpci_vqs[idx];
357
		struct vq_alloc_info *info;
358
		struct virtqueue *vq;
359
		bus_size_t notify_offset;
360
		uint16_t size;
361
362
		vqx = &cn->vtpci_vqs[idx];
363
		info = &vq_info[idx];
501
		info = &vq_info[idx];
364
502
365
		size = vtpci_get_vq_size(cn, idx);
503
		vtpci_select_virtqueue(sc, idx);
366
		notify_offset = vtpci_get_vq_notify_off(cn, idx);
504
		size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
367
505
368
		error = virtqueue_alloc(dev, idx, size, notify_offset, align,
506
		error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN,
369
		    0xFFFFFFFFUL, info, &vq);
507
		    0xFFFFFFFFUL, info, &vq);
370
		if (error) {
508
		if (error) {
371
			device_printf(dev,
509
			device_printf(dev,
Lines 373-399 Link Here
373
			break;
511
			break;
374
		}
512
		}
375
513
376
		vtpci_set_vq(cn, vq);
514
		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
515
		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
377
516
378
		vqx->vtv_vq = *info->vqai_vq = vq;
517
		vqx->vtv_vq = *info->vqai_vq = vq;
379
		vqx->vtv_no_intr = info->vqai_intr == NULL;
518
		vqx->vtv_no_intr = info->vqai_intr == NULL;
380
519
381
		cn->vtpci_nvqs++;
520
		sc->vtpci_nvqs++;
382
	}
521
	}
383
522
384
	if (error)
523
	if (error)
385
		vtpci_free_virtqueues(cn);
524
		vtpci_free_virtqueues(sc);
386
525
387
	return (error);
526
	return (error);
388
}
527
}
389
528
390
static int
529
static int
391
vtpci_alloc_msix(struct vtpci_common *cn, int nvectors)
530
vtpci_setup_intr(device_t dev, enum intr_type type)
392
{
531
{
532
	struct vtpci_softc *sc;
533
	int attempt, error;
534
535
	sc = device_get_softc(dev);
536
537
	for (attempt = 0; attempt < 5; attempt++) {
538
		/*
539
		 * Start with the most desirable interrupt configuration and
540
		 * fallback towards less desirable ones.
541
		 */
542
		switch (attempt) {
543
		case 0:
544
			error = vtpci_alloc_intr_msix_pervq(sc);
545
			break;
546
		case 1:
547
			error = vtpci_alloc_intr_msix_shared(sc);
548
			break;
549
		case 2:
550
			error = vtpci_alloc_intr_msi(sc);
551
			break;
552
		case 3:
553
			error = vtpci_alloc_intr_legacy(sc);
554
			break;
555
		default:
556
			device_printf(dev,
557
			    "exhausted all interrupt allocation attempts\n");
558
			return (ENXIO);
559
		}
560
561
		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
562
			break;
563
564
		vtpci_cleanup_setup_intr_attempt(sc);
565
	}
566
567
	if (bootverbose) {
568
		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
569
			device_printf(dev, "using legacy interrupt\n");
570
		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
571
			device_printf(dev, "using MSI interrupt\n");
572
		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
573
			device_printf(dev, "using shared MSIX interrupts\n");
574
		else
575
			device_printf(dev, "using per VQ MSIX interrupts\n");
576
	}
577
578
	return (0);
579
}
580
581
static void
582
vtpci_stop(device_t dev)
583
{
584
585
	vtpci_reset(device_get_softc(dev));
586
}
587
588
static int
589
vtpci_reinit(device_t dev, uint64_t features)
590
{
591
	struct vtpci_softc *sc;
592
	int idx, error;
593
594
	sc = device_get_softc(dev);
595
596
	/*
597
	 * Redrive the device initialization. This is a bit of an abuse of
598
	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
599
	 * play nice.
600
	 *
601
	 * We do not allow the host device to change from what was originally
602
	 * negotiated beyond what the guest driver changed. MSIX state should
603
	 * not change, number of virtqueues and their size remain the same, etc.
604
	 * This will need to be rethought when we want to support migration.
605
	 */
606
607
	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
608
		vtpci_stop(dev);
609
610
	/*
611
	 * Quickly drive the status through ACK and DRIVER. The device
612
	 * does not become usable again until vtpci_reinit_complete().
613
	 */
614
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
615
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
616
617
	vtpci_negotiate_features(dev, features);
618
619
	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
620
		error = vtpci_reinit_virtqueue(sc, idx);
621
		if (error)
622
			return (error);
623
	}
624
625
	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
626
		error = vtpci_set_host_msix_vectors(sc);
627
		if (error)
628
			return (error);
629
	}
630
631
	return (0);
632
}
633
634
static void
635
vtpci_reinit_complete(device_t dev)
636
{
637
638
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
639
}
640
641
static void
642
vtpci_notify_virtqueue(device_t dev, uint16_t queue)
643
{
644
	struct vtpci_softc *sc;
645
646
	sc = device_get_softc(dev);
647
648
	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
649
}
650
651
static uint8_t
652
vtpci_get_status(device_t dev)
653
{
654
	struct vtpci_softc *sc;
655
656
	sc = device_get_softc(dev);
657
658
	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
659
}
660
661
static void
662
vtpci_set_status(device_t dev, uint8_t status)
663
{
664
	struct vtpci_softc *sc;
665
666
	sc = device_get_softc(dev);
667
668
	if (status != VIRTIO_CONFIG_STATUS_RESET)
669
		status |= vtpci_get_status(dev);
670
671
	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
672
}
673
674
static void
675
vtpci_read_dev_config(device_t dev, bus_size_t offset,
676
    void *dst, int length)
677
{
678
	struct vtpci_softc *sc;
679
	bus_size_t off;
680
	uint8_t *d;
681
	int size;
682
683
	sc = device_get_softc(dev);
684
	off = VIRTIO_PCI_CONFIG(sc) + offset;
685
686
	for (d = dst; length > 0; d += size, off += size, length -= size) {
687
		if (length >= 4) {
688
			size = 4;
689
			*(uint32_t *)d = vtpci_read_config_4(sc, off);
690
		} else if (length >= 2) {
691
			size = 2;
692
			*(uint16_t *)d = vtpci_read_config_2(sc, off);
693
		} else {
694
			size = 1;
695
			*d = vtpci_read_config_1(sc, off);
696
		}
697
	}
698
}
699
700
static void
701
vtpci_write_dev_config(device_t dev, bus_size_t offset,
702
    void *src, int length)
703
{
704
	struct vtpci_softc *sc;
705
	bus_size_t off;
706
	uint8_t *s;
707
	int size;
708
709
	sc = device_get_softc(dev);
710
	off = VIRTIO_PCI_CONFIG(sc) + offset;
711
712
	for (s = src; length > 0; s += size, off += size, length -= size) {
713
		if (length >= 4) {
714
			size = 4;
715
			vtpci_write_config_4(sc, off, *(uint32_t *)s);
716
		} else if (length >= 2) {
717
			size = 2;
718
			vtpci_write_config_2(sc, off, *(uint16_t *)s);
719
		} else {
720
			size = 1;
721
			vtpci_write_config_1(sc, off, *s);
722
		}
723
	}
724
}
725
726
static void
727
vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
728
    uint64_t features)
729
{
730
	device_t dev, child;
731
732
	dev = sc->vtpci_dev;
733
	child = sc->vtpci_child_dev;
734
735
	if (device_is_attached(child) || bootverbose == 0)
736
		return;
737
738
	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
739
}
740
741
static void
742
vtpci_probe_and_attach_child(struct vtpci_softc *sc)
743
{
744
	device_t dev, child;
745
746
	dev = sc->vtpci_dev;
747
	child = sc->vtpci_child_dev;
748
749
	if (child == NULL)
750
		return;
751
752
	if (device_get_state(child) != DS_NOTPRESENT)
753
		return;
754
755
	if (device_probe(child) != 0)
756
		return;
757
758
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
759
	if (device_attach(child) != 0) {
760
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
761
		vtpci_reset(sc);
762
		vtpci_release_child_resources(sc);
763
		/* Reset status for future attempt. */
764
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
765
	} else {
766
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
767
		VIRTIO_ATTACH_COMPLETED(child);
768
	}
769
}
770
771
static int
772
vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
773
{
393
	device_t dev;
774
	device_t dev;
394
	int nmsix, cnt, required;
775
	int nmsix, cnt, required;
395
776
396
	dev = cn->vtpci_dev;
777
	dev = sc->vtpci_dev;
397
778
398
	/* Allocate an additional vector for the config changes. */
779
	/* Allocate an additional vector for the config changes. */
399
	required = nvectors + 1;
780
	required = nvectors + 1;
Lines 404-410 Link Here
404
785
405
	cnt = required;
786
	cnt = required;
406
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
787
	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
407
		cn->vtpci_nmsix_resources = required;
788
		sc->vtpci_nmsix_resources = required;
408
		return (0);
789
		return (0);
409
	}
790
	}
410
791
Lines 414-425 Link Here
414
}
795
}
415
796
416
static int
797
static int
417
vtpci_alloc_msi(struct vtpci_common *cn)
798
vtpci_alloc_msi(struct vtpci_softc *sc)
418
{
799
{
419
	device_t dev;
800
	device_t dev;
420
	int nmsi, cnt, required;
801
	int nmsi, cnt, required;
421
802
422
	dev = cn->vtpci_dev;
803
	dev = sc->vtpci_dev;
423
	required = 1;
804
	required = 1;
424
805
425
	nmsi = pci_msi_count(dev);
806
	nmsi = pci_msi_count(dev);
Lines 436-513 Link Here
436
}
817
}
437
818
438
static int
819
static int
439
vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn)
820
vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
440
{
821
{
441
	int i, nvectors, error;
822
	int i, nvectors, error;
442
823
443
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
824
	if (vtpci_disable_msix != 0 ||
825
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
444
		return (ENOTSUP);
826
		return (ENOTSUP);
445
827
446
	for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) {
828
	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
447
		if (cn->vtpci_vqs[i].vtv_no_intr == 0)
829
		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
448
			nvectors++;
830
			nvectors++;
449
	}
831
	}
450
832
451
	error = vtpci_alloc_msix(cn, nvectors);
833
	error = vtpci_alloc_msix(sc, nvectors);
452
	if (error)
834
	if (error)
453
		return (error);
835
		return (error);
454
836
455
	cn->vtpci_flags |= VTPCI_FLAG_MSIX;
837
	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
456
838
457
	return (0);
839
	return (0);
458
}
840
}
459
841
460
static int
842
static int
461
vtpci_alloc_intr_msix_shared(struct vtpci_common *cn)
843
vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
462
{
844
{
463
	int error;
845
	int error;
464
846
465
	if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX)
847
	if (vtpci_disable_msix != 0 ||
848
	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
466
		return (ENOTSUP);
849
		return (ENOTSUP);
467
850
468
	error = vtpci_alloc_msix(cn, 1);
851
	error = vtpci_alloc_msix(sc, 1);
469
	if (error)
852
	if (error)
470
		return (error);
853
		return (error);
471
854
472
	cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
855
	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
473
856
474
	return (0);
857
	return (0);
475
}
858
}
476
859
477
static int
860
static int
478
vtpci_alloc_intr_msi(struct vtpci_common *cn)
861
vtpci_alloc_intr_msi(struct vtpci_softc *sc)
479
{
862
{
480
	int error;
863
	int error;
481
864
482
	/* Only BHyVe supports MSI. */
865
	/* Only BHyVe supports MSI. */
483
	if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI)
866
	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
484
		return (ENOTSUP);
867
		return (ENOTSUP);
485
868
486
	error = vtpci_alloc_msi(cn);
869
	error = vtpci_alloc_msi(sc);
487
	if (error)
870
	if (error)
488
		return (error);
871
		return (error);
489
872
490
	cn->vtpci_flags |= VTPCI_FLAG_MSI;
873
	sc->vtpci_flags |= VTPCI_FLAG_MSI;
491
874
492
	return (0);
875
	return (0);
493
}
876
}
494
877
495
static int
878
static int
496
vtpci_alloc_intr_intx(struct vtpci_common *cn)
879
vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
497
{
880
{
498
881
499
	cn->vtpci_flags |= VTPCI_FLAG_INTX;
882
	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
500
883
501
	return (0);
884
	return (0);
502
}
885
}
503
886
504
static int
887
static int
505
vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags,
888
vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
506
    struct vtpci_interrupt *intr)
889
    struct vtpci_interrupt *intr)
507
{
890
{
508
	struct resource *irq;
891
	struct resource *irq;
509
892
510
	irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags);
893
	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags);
511
	if (irq == NULL)
894
	if (irq == NULL)
512
		return (ENXIO);
895
		return (ENXIO);
513
896
Lines 517-652 Link Here
517
	return (0);
900
	return (0);
518
}
901
}
519
902
520
static void
521
vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr)
522
{
523
	device_t dev;
524
525
	dev = cn->vtpci_dev;
526
527
	if (intr->vti_handler != NULL) {
528
		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
529
		intr->vti_handler = NULL;
530
	}
531
532
	if (intr->vti_irq != NULL) {
533
		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
534
		    intr->vti_irq);
535
		intr->vti_irq = NULL;
536
		intr->vti_rid = -1;
537
	}
538
}
539
540
static void
541
vtpci_free_interrupts(struct vtpci_common *cn)
542
{
543
	struct vtpci_interrupt *intr;
544
	int i, nvq_intrs;
545
546
	vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt);
547
548
	if (cn->vtpci_nmsix_resources != 0) {
549
		nvq_intrs = cn->vtpci_nmsix_resources - 1;
550
		cn->vtpci_nmsix_resources = 0;
551
552
		if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) {
553
			for (i = 0; i < nvq_intrs; i++, intr++)
554
				vtpci_free_interrupt(cn, intr);
555
556
			free(cn->vtpci_msix_vq_interrupts, M_DEVBUF);
557
			cn->vtpci_msix_vq_interrupts = NULL;
558
		}
559
	}
560
561
	if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
562
		pci_release_msi(cn->vtpci_dev);
563
564
	cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
565
}
566
567
static void
568
vtpci_free_virtqueues(struct vtpci_common *cn)
569
{