Lines 1-4
Link Here
|
1 |
/*- |
1 |
/*- |
|
|
2 |
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD |
3 |
* |
2 |
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> |
4 |
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> |
3 |
* All rights reserved. |
5 |
* All rights reserved. |
4 |
* |
6 |
* |
Lines 67-88
Link Here
|
67 |
#include <netinet6/ip6_var.h> |
69 |
#include <netinet6/ip6_var.h> |
68 |
#include <netinet/udp.h> |
70 |
#include <netinet/udp.h> |
69 |
#include <netinet/tcp.h> |
71 |
#include <netinet/tcp.h> |
|
|
72 |
#include <netinet/tcp_lro.h> |
70 |
|
73 |
|
71 |
#include <machine/bus.h> |
74 |
#include <machine/bus.h> |
72 |
#include <machine/resource.h> |
75 |
#include <machine/resource.h> |
73 |
#include <sys/bus.h> |
76 |
#include <sys/bus.h> |
74 |
#include <sys/rman.h> |
77 |
#include <sys/rman.h> |
75 |
|
78 |
|
|
|
79 |
#include <sys/queue.h> |
80 |
|
76 |
#include <dev/virtio/virtio.h> |
81 |
#include <dev/virtio/virtio.h> |
77 |
#include <dev/virtio/virtqueue.h> |
82 |
#include <dev/virtio/virtqueue.h> |
78 |
#include <dev/virtio/network/virtio_net.h> |
83 |
#include <dev/virtio/network/virtio_net.h> |
79 |
#include <dev/virtio/network/if_vtnetvar.h> |
84 |
#include <dev/virtio/network/if_vtnetvar.h> |
80 |
|
|
|
81 |
#include "virtio_if.h" |
85 |
#include "virtio_if.h" |
82 |
|
86 |
|
83 |
#include "opt_inet.h" |
87 |
#include "opt_inet.h" |
84 |
#include "opt_inet6.h" |
88 |
#include "opt_inet6.h" |
85 |
|
89 |
|
|
|
90 |
#if defined(INET) || defined(INET6) |
91 |
#include <machine/in_cksum.h> |
92 |
#endif |
93 |
|
86 |
static int vtnet_modevent(module_t, int, void *); |
94 |
static int vtnet_modevent(module_t, int, void *); |
87 |
|
95 |
|
88 |
static int vtnet_probe(device_t); |
96 |
static int vtnet_probe(device_t); |
Lines 94-101
Link Here
|
94 |
static int vtnet_attach_completed(device_t); |
102 |
static int vtnet_attach_completed(device_t); |
95 |
static int vtnet_config_change(device_t); |
103 |
static int vtnet_config_change(device_t); |
96 |
|
104 |
|
97 |
static void vtnet_negotiate_features(struct vtnet_softc *); |
105 |
static int vtnet_negotiate_features(struct vtnet_softc *); |
98 |
static void vtnet_setup_features(struct vtnet_softc *); |
106 |
static int vtnet_setup_features(struct vtnet_softc *); |
99 |
static int vtnet_init_rxq(struct vtnet_softc *, int); |
107 |
static int vtnet_init_rxq(struct vtnet_softc *, int); |
100 |
static int vtnet_init_txq(struct vtnet_softc *, int); |
108 |
static int vtnet_init_txq(struct vtnet_softc *, int); |
101 |
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); |
109 |
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); |
Lines 103-110
Link Here
|
103 |
static int vtnet_alloc_rx_filters(struct vtnet_softc *); |
111 |
static int vtnet_alloc_rx_filters(struct vtnet_softc *); |
104 |
static void vtnet_free_rx_filters(struct vtnet_softc *); |
112 |
static void vtnet_free_rx_filters(struct vtnet_softc *); |
105 |
static int vtnet_alloc_virtqueues(struct vtnet_softc *); |
113 |
static int vtnet_alloc_virtqueues(struct vtnet_softc *); |
|
|
114 |
static int vtnet_alloc_interface(struct vtnet_softc *); |
106 |
static int vtnet_setup_interface(struct vtnet_softc *); |
115 |
static int vtnet_setup_interface(struct vtnet_softc *); |
107 |
static int vtnet_change_mtu(struct vtnet_softc *, int); |
116 |
static int vtnet_ioctl_mtu(struct vtnet_softc *, int); |
|
|
117 |
static int vtnet_ioctl_ifflags(struct vtnet_softc *); |
118 |
static int vtnet_ioctl_multi(struct vtnet_softc *); |
119 |
static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *); |
108 |
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); |
120 |
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); |
109 |
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); |
121 |
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); |
110 |
|
122 |
|
Lines 112-122
Link Here
|
112 |
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); |
124 |
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); |
113 |
static struct mbuf * |
125 |
static struct mbuf * |
114 |
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); |
126 |
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); |
115 |
static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, |
127 |
static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *, |
116 |
struct mbuf *, int); |
128 |
struct mbuf *, int); |
117 |
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); |
129 |
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); |
118 |
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); |
130 |
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); |
119 |
static int vtnet_rxq_new_buf(struct vtnet_rxq *); |
131 |
static int vtnet_rxq_new_buf(struct vtnet_rxq *); |
|
|
132 |
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *, |
133 |
uint16_t, int, struct virtio_net_hdr *); |
134 |
static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, |
135 |
uint16_t, int, struct virtio_net_hdr *); |
120 |
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, |
136 |
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, |
121 |
struct virtio_net_hdr *); |
137 |
struct virtio_net_hdr *); |
122 |
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); |
138 |
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); |
Lines 128-133
Link Here
|
128 |
static void vtnet_rx_vq_intr(void *); |
144 |
static void vtnet_rx_vq_intr(void *); |
129 |
static void vtnet_rxq_tq_intr(void *, int); |
145 |
static void vtnet_rxq_tq_intr(void *, int); |
130 |
|
146 |
|
|
|
147 |
static int vtnet_txq_intr_threshold(struct vtnet_txq *); |
131 |
static int vtnet_txq_below_threshold(struct vtnet_txq *); |
148 |
static int vtnet_txq_below_threshold(struct vtnet_txq *); |
132 |
static int vtnet_txq_notify(struct vtnet_txq *); |
149 |
static int vtnet_txq_notify(struct vtnet_txq *); |
133 |
static void vtnet_txq_free_mbufs(struct vtnet_txq *); |
150 |
static void vtnet_txq_free_mbufs(struct vtnet_txq *); |
Lines 140-146
Link Here
|
140 |
struct virtio_net_hdr *); |
157 |
struct virtio_net_hdr *); |
141 |
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, |
158 |
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, |
142 |
struct vtnet_tx_header *); |
159 |
struct vtnet_tx_header *); |
143 |
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **); |
160 |
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int); |
144 |
#ifdef VTNET_LEGACY_TX |
161 |
#ifdef VTNET_LEGACY_TX |
145 |
static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *); |
162 |
static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *); |
146 |
static void vtnet_start(struct ifnet *); |
163 |
static void vtnet_start(struct ifnet *); |
Lines 177-182
Link Here
|
177 |
static int vtnet_init_tx_queues(struct vtnet_softc *); |
194 |
static int vtnet_init_tx_queues(struct vtnet_softc *); |
178 |
static int vtnet_init_rxtx_queues(struct vtnet_softc *); |
195 |
static int vtnet_init_rxtx_queues(struct vtnet_softc *); |
179 |
static void vtnet_set_active_vq_pairs(struct vtnet_softc *); |
196 |
static void vtnet_set_active_vq_pairs(struct vtnet_softc *); |
|
|
197 |
static void vtnet_update_rx_offloads(struct vtnet_softc *); |
180 |
static int vtnet_reinit(struct vtnet_softc *); |
198 |
static int vtnet_reinit(struct vtnet_softc *); |
181 |
static void vtnet_init_locked(struct vtnet_softc *); |
199 |
static void vtnet_init_locked(struct vtnet_softc *); |
182 |
static void vtnet_init(void *); |
200 |
static void vtnet_init(void *); |
Lines 185-195
Link Here
|
185 |
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, |
203 |
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, |
186 |
struct sglist *, int, int); |
204 |
struct sglist *, int, int); |
187 |
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); |
205 |
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); |
|
|
206 |
static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t); |
188 |
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); |
207 |
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); |
189 |
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); |
208 |
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int); |
190 |
static int vtnet_set_promisc(struct vtnet_softc *, int); |
209 |
static int vtnet_set_promisc(struct vtnet_softc *, int); |
191 |
static int vtnet_set_allmulti(struct vtnet_softc *, int); |
210 |
static int vtnet_set_allmulti(struct vtnet_softc *, int); |
192 |
static void vtnet_attach_disable_promisc(struct vtnet_softc *); |
|
|
193 |
static void vtnet_rx_filter(struct vtnet_softc *); |
211 |
static void vtnet_rx_filter(struct vtnet_softc *); |
194 |
static void vtnet_rx_filter_mac(struct vtnet_softc *); |
212 |
static void vtnet_rx_filter_mac(struct vtnet_softc *); |
195 |
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); |
213 |
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); |
Lines 198-218
Link Here
|
198 |
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); |
216 |
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); |
199 |
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); |
217 |
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); |
200 |
|
218 |
|
|
|
219 |
static void vtnet_update_speed_duplex(struct vtnet_softc *); |
201 |
static int vtnet_is_link_up(struct vtnet_softc *); |
220 |
static int vtnet_is_link_up(struct vtnet_softc *); |
202 |
static void vtnet_update_link_status(struct vtnet_softc *); |
221 |
static void vtnet_update_link_status(struct vtnet_softc *); |
203 |
static int vtnet_ifmedia_upd(struct ifnet *); |
222 |
static int vtnet_ifmedia_upd(struct ifnet *); |
204 |
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
223 |
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
205 |
static void vtnet_get_hwaddr(struct vtnet_softc *); |
224 |
static void vtnet_get_macaddr(struct vtnet_softc *); |
206 |
static void vtnet_set_hwaddr(struct vtnet_softc *); |
225 |
static void vtnet_set_macaddr(struct vtnet_softc *); |
|
|
226 |
static void vtnet_attached_set_macaddr(struct vtnet_softc *); |
207 |
static void vtnet_vlan_tag_remove(struct mbuf *); |
227 |
static void vtnet_vlan_tag_remove(struct mbuf *); |
208 |
static void vtnet_set_rx_process_limit(struct vtnet_softc *); |
228 |
static void vtnet_set_rx_process_limit(struct vtnet_softc *); |
209 |
static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); |
|
|
210 |
|
229 |
|
211 |
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, |
230 |
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, |
212 |
struct sysctl_oid_list *, struct vtnet_rxq *); |
231 |
struct sysctl_oid_list *, struct vtnet_rxq *); |
213 |
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, |
232 |
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, |
214 |
struct sysctl_oid_list *, struct vtnet_txq *); |
233 |
struct sysctl_oid_list *, struct vtnet_txq *); |
215 |
static void vtnet_setup_queue_sysctl(struct vtnet_softc *); |
234 |
static void vtnet_setup_queue_sysctl(struct vtnet_softc *); |
|
|
235 |
static void vtnet_load_tunables(struct vtnet_softc *); |
216 |
static void vtnet_setup_sysctl(struct vtnet_softc *); |
236 |
static void vtnet_setup_sysctl(struct vtnet_softc *); |
217 |
|
237 |
|
218 |
static int vtnet_rxq_enable_intr(struct vtnet_rxq *); |
238 |
static int vtnet_rxq_enable_intr(struct vtnet_rxq *); |
Lines 229-285
Link Here
|
229 |
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); |
249 |
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); |
230 |
|
250 |
|
231 |
/* Tunables. */ |
251 |
/* Tunables. */ |
232 |
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters"); |
252 |
#define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val) |
|
|
253 |
#define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val) |
254 |
#define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val) |
255 |
#define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val) |
256 |
#define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val) |
257 |
#define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val) |
258 |
|
259 |
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver"); |
260 |
|
233 |
static int vtnet_csum_disable = 0; |
261 |
static int vtnet_csum_disable = 0; |
234 |
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); |
|
|
235 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, |
262 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, |
236 |
&vtnet_csum_disable, 0, "Disables receive and send checksum offload"); |
263 |
&vtnet_csum_disable, 0, "Disables receive and send checksum offload"); |
|
|
264 |
|
265 |
static int vtnet_fixup_needs_csum = 0; |
266 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN, |
267 |
&vtnet_fixup_needs_csum, 0, |
268 |
"Calculate valid checksum for NEEDS_CSUM packets"); |
269 |
|
237 |
static int vtnet_tso_disable = 0; |
270 |
static int vtnet_tso_disable = 0; |
238 |
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); |
271 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, |
239 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable, |
272 |
&vtnet_tso_disable, 0, "Disables TSO"); |
240 |
0, "Disables TCP Segmentation Offload"); |
273 |
|
241 |
static int vtnet_lro_disable = 0; |
274 |
static int vtnet_lro_disable = 0; |
242 |
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); |
275 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, |
243 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable, |
276 |
&vtnet_lro_disable, 0, "Disables hardware LRO"); |
244 |
0, "Disables TCP Large Receive Offload"); |
277 |
|
245 |
static int vtnet_mq_disable = 0; |
278 |
static int vtnet_mq_disable = 0; |
246 |
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); |
279 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, |
247 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable, |
280 |
&vtnet_mq_disable, 0, "Disables multiqueue support"); |
248 |
0, "Disables Multi Queue support"); |
281 |
|
249 |
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; |
282 |
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; |
250 |
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); |
|
|
251 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, |
283 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, |
252 |
&vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs"); |
284 |
&vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs"); |
253 |
static int vtnet_rx_process_limit = 512; |
285 |
|
254 |
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); |
286 |
static int vtnet_tso_maxlen = IP_MAXPACKET; |
|
|
287 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, |
288 |
&vtnet_tso_maxlen, 0, "TSO burst limit"); |
289 |
|
290 |
static int vtnet_rx_process_limit = 1024; |
255 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, |
291 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, |
256 |
&vtnet_rx_process_limit, 0, |
292 |
&vtnet_rx_process_limit, 0, |
257 |
"Limits the number RX segments processed in a single pass"); |
293 |
"Number of RX segments processed in one pass"); |
258 |
|
294 |
|
|
|
295 |
static int vtnet_lro_entry_count = 128; |
296 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, |
297 |
&vtnet_lro_entry_count, 0, "Software LRO entry count"); |
298 |
|
299 |
/* Enable sorted LRO, and the depth of the mbuf queue. */ |
300 |
static int vtnet_lro_mbufq_depth = 0; |
301 |
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, |
302 |
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue"); |
303 |
|
259 |
static uma_zone_t vtnet_tx_header_zone; |
304 |
static uma_zone_t vtnet_tx_header_zone; |
260 |
|
305 |
|
261 |
static struct virtio_feature_desc vtnet_feature_desc[] = { |
306 |
static struct virtio_feature_desc vtnet_feature_desc[] = { |
262 |
{ VIRTIO_NET_F_CSUM, "TxChecksum" }, |
307 |
{ VIRTIO_NET_F_CSUM, "TxChecksum" }, |
263 |
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, |
308 |
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, |
264 |
{ VIRTIO_NET_F_MAC, "MacAddress" }, |
309 |
{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" }, |
265 |
{ VIRTIO_NET_F_GSO, "TxAllGSO" }, |
310 |
{ VIRTIO_NET_F_MAC, "MAC" }, |
266 |
{ VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, |
311 |
{ VIRTIO_NET_F_GSO, "TxGSO" }, |
267 |
{ VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, |
312 |
{ VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" }, |
268 |
{ VIRTIO_NET_F_GUEST_ECN, "RxECN" }, |
313 |
{ VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" }, |
269 |
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, |
314 |
{ VIRTIO_NET_F_GUEST_ECN, "RxLROECN" }, |
270 |
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, |
315 |
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, |
271 |
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, |
316 |
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, |
272 |
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, |
317 |
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, |
273 |
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" }, |
318 |
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, |
274 |
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, |
319 |
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" }, |
275 |
{ VIRTIO_NET_F_STATUS, "Status" }, |
320 |
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, |
276 |
{ VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, |
321 |
{ VIRTIO_NET_F_STATUS, "Status" }, |
277 |
{ VIRTIO_NET_F_CTRL_RX, "RxMode" }, |
322 |
{ VIRTIO_NET_F_CTRL_VQ, "CtrlVq" }, |
278 |
{ VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, |
323 |
{ VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" }, |
279 |
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, |
324 |
{ VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" }, |
280 |
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, |
325 |
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" }, |
281 |
{ VIRTIO_NET_F_MQ, "Multiqueue" }, |
326 |
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, |
282 |
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, |
327 |
{ VIRTIO_NET_F_MQ, "Multiqueue" }, |
|
|
328 |
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" }, |
329 |
{ VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" }, |
283 |
|
330 |
|
284 |
{ 0, NULL } |
331 |
{ 0, NULL } |
285 |
}; |
332 |
}; |
Lines 305-320
Link Here
|
305 |
#endif /* DEV_NETMAP */ |
352 |
#endif /* DEV_NETMAP */ |
306 |
|
353 |
|
307 |
static driver_t vtnet_driver = { |
354 |
static driver_t vtnet_driver = { |
308 |
"vtnet", |
355 |
.name = "vtnet", |
309 |
vtnet_methods, |
356 |
.methods = vtnet_methods, |
310 |
sizeof(struct vtnet_softc) |
357 |
.size = sizeof(struct vtnet_softc) |
311 |
}; |
358 |
}; |
312 |
static devclass_t vtnet_devclass; |
359 |
static devclass_t vtnet_devclass; |
313 |
|
360 |
|
314 |
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, |
361 |
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, |
315 |
vtnet_modevent, 0); |
362 |
vtnet_modevent, 0); |
316 |
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, |
363 |
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); |
317 |
vtnet_modevent, 0); |
364 |
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); |
318 |
MODULE_VERSION(vtnet, 1); |
365 |
MODULE_VERSION(vtnet, 1); |
319 |
MODULE_DEPEND(vtnet, virtio, 1, 1, 1); |
366 |
MODULE_DEPEND(vtnet, virtio, 1, 1, 1); |
320 |
#ifdef DEV_NETMAP |
367 |
#ifdef DEV_NETMAP |
Lines 361-367
Link Here
|
361 |
if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) |
408 |
if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) |
362 |
return (ENXIO); |
409 |
return (ENXIO); |
363 |
|
410 |
|
364 |
device_set_desc(dev, "VirtIO Networking Adapter"); |
411 |
device_set_desc(dev, "VirtIO Network Adapter"); |
365 |
|
412 |
|
366 |
return (BUS_PROBE_DEFAULT); |
413 |
return (BUS_PROBE_DEFAULT); |
367 |
} |
414 |
} |
Lines 380-389
Link Here
|
380 |
|
427 |
|
381 |
VTNET_CORE_LOCK_INIT(sc); |
428 |
VTNET_CORE_LOCK_INIT(sc); |
382 |
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); |
429 |
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); |
|
|
430 |
vtnet_load_tunables(sc); |
383 |
|
431 |
|
|
|
432 |
error = vtnet_alloc_interface(sc); |
433 |
if (error) { |
434 |
device_printf(dev, "cannot allocate interface\n"); |
435 |
goto fail; |
436 |
} |
437 |
|
384 |
vtnet_setup_sysctl(sc); |
438 |
vtnet_setup_sysctl(sc); |
385 |
vtnet_setup_features(sc); |
|
|
386 |
|
439 |
|
|
|
440 |
error = vtnet_setup_features(sc); |
441 |
if (error) { |
442 |
device_printf(dev, "cannot setup features\n"); |
443 |
goto fail; |
444 |
} |
445 |
|
387 |
error = vtnet_alloc_rx_filters(sc); |
446 |
error = vtnet_alloc_rx_filters(sc); |
388 |
if (error) { |
447 |
if (error) { |
389 |
device_printf(dev, "cannot allocate Rx filters\n"); |
448 |
device_printf(dev, "cannot allocate Rx filters\n"); |
Lines 410-416
Link Here
|
410 |
|
469 |
|
411 |
error = virtio_setup_intr(dev, INTR_TYPE_NET); |
470 |
error = virtio_setup_intr(dev, INTR_TYPE_NET); |
412 |
if (error) { |
471 |
if (error) { |
413 |
device_printf(dev, "cannot setup virtqueue interrupts\n"); |
472 |
device_printf(dev, "cannot setup interrupts\n"); |
414 |
/* BMV: This will crash if during boot! */ |
473 |
/* BMV: This will crash if during boot! */ |
415 |
ether_ifdetach(sc->vtnet_ifp); |
474 |
ether_ifdetach(sc->vtnet_ifp); |
416 |
goto fail; |
475 |
goto fail; |
Lines 518-524
Link Here
|
518 |
static int |
577 |
static int |
519 |
vtnet_shutdown(device_t dev) |
578 |
vtnet_shutdown(device_t dev) |
520 |
{ |
579 |
{ |
521 |
|
|
|
522 |
/* |
580 |
/* |
523 |
* Suspend already does all of what we need to |
581 |
* Suspend already does all of what we need to |
524 |
* do here; we just never expect to be resumed. |
582 |
* do here; we just never expect to be resumed. |
Lines 529-537
Link Here
|
529 |
static int |
587 |
static int |
530 |
vtnet_attach_completed(device_t dev) |
588 |
vtnet_attach_completed(device_t dev) |
531 |
{ |
589 |
{ |
|
|
590 |
struct vtnet_softc *sc; |
532 |
|
591 |
|
533 |
vtnet_attach_disable_promisc(device_get_softc(dev)); |
592 |
sc = device_get_softc(dev); |
534 |
|
593 |
|
|
|
594 |
VTNET_CORE_LOCK(sc); |
595 |
vtnet_attached_set_macaddr(sc); |
596 |
VTNET_CORE_UNLOCK(sc); |
597 |
|
535 |
return (0); |
598 |
return (0); |
536 |
} |
599 |
} |
537 |
|
600 |
|
Lines 551-587
Link Here
|
551 |
return (0); |
614 |
return (0); |
552 |
} |
615 |
} |
553 |
|
616 |
|
554 |
static void |
617 |
static int |
555 |
vtnet_negotiate_features(struct vtnet_softc *sc) |
618 |
vtnet_negotiate_features(struct vtnet_softc *sc) |
556 |
{ |
619 |
{ |
557 |
device_t dev; |
620 |
device_t dev; |
558 |
uint64_t mask, features; |
621 |
uint64_t features, negotiated_features; |
|
|
622 |
int no_csum; |
559 |
|
623 |
|
560 |
dev = sc->vtnet_dev; |
624 |
dev = sc->vtnet_dev; |
561 |
mask = 0; |
625 |
features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES : |
|
|
626 |
VTNET_LEGACY_FEATURES; |
562 |
|
627 |
|
563 |
/* |
628 |
/* |
564 |
* TSO and LRO are only available when their corresponding checksum |
629 |
* TSO and LRO are only available when their corresponding checksum |
565 |
* offload feature is also negotiated. |
630 |
* offload feature is also negotiated. |
566 |
*/ |
631 |
*/ |
567 |
if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { |
632 |
no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable); |
568 |
mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; |
633 |
if (no_csum) |
569 |
mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; |
634 |
features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM); |
570 |
} |
635 |
if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) |
571 |
if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) |
636 |
features &= ~VTNET_TSO_FEATURES; |
572 |
mask |= VTNET_TSO_FEATURES; |
637 |
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) |
573 |
if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) |
638 |
features &= ~VTNET_LRO_FEATURES; |
574 |
mask |= VTNET_LRO_FEATURES; |
639 |
|
575 |
#ifndef VTNET_LEGACY_TX |
640 |
#ifndef VTNET_LEGACY_TX |
576 |
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) |
641 |
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) |
577 |
mask |= VIRTIO_NET_F_MQ; |
642 |
features &= ~VIRTIO_NET_F_MQ; |
578 |
#else |
643 |
#else |
579 |
mask |= VIRTIO_NET_F_MQ; |
644 |
features &= ~VIRTIO_NET_F_MQ; |
580 |
#endif |
645 |
#endif |
581 |
|
646 |
|
582 |
features = VTNET_FEATURES & ~mask; |
647 |
negotiated_features = virtio_negotiate_features(dev, features); |
583 |
sc->vtnet_features = virtio_negotiate_features(dev, features); |
|
|
584 |
|
648 |
|
|
|
649 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { |
650 |
uint16_t mtu; |
651 |
|
652 |
mtu = virtio_read_dev_config_2(dev, |
653 |
offsetof(struct virtio_net_config, mtu)); |
654 |
if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) { |
655 |
device_printf(dev, "Invalid MTU value: %d. " |
656 |
"MTU feature disabled.\n", mtu); |
657 |
features &= ~VIRTIO_NET_F_MTU; |
658 |
negotiated_features = |
659 |
virtio_negotiate_features(dev, features); |
660 |
} |
661 |
} |
662 |
|
663 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { |
664 |
uint16_t npairs; |
665 |
|
666 |
npairs = virtio_read_dev_config_2(dev, |
667 |
offsetof(struct virtio_net_config, max_virtqueue_pairs)); |
668 |
if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
669 |
npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { |
670 |
device_printf(dev, "Invalid max_virtqueue_pairs value: " |
671 |
"%d. Multiqueue feature disabled.\n", npairs); |
672 |
features &= ~VIRTIO_NET_F_MQ; |
673 |
negotiated_features = |
674 |
virtio_negotiate_features(dev, features); |
675 |
} |
676 |
} |
677 |
|
585 |
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && |
678 |
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && |
586 |
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { |
679 |
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { |
587 |
/* |
680 |
/* |
Lines 595-620
Link Here
|
595 |
*/ |
688 |
*/ |
596 |
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { |
689 |
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { |
597 |
device_printf(dev, |
690 |
device_printf(dev, |
598 |
"LRO disabled due to both mergeable buffers and " |
691 |
"Host LRO disabled since both mergeable buffers " |
599 |
"indirect descriptors not negotiated\n"); |
692 |
"and indirect descriptors were not negotiated\n"); |
600 |
|
|
|
601 |
features &= ~VTNET_LRO_FEATURES; |
693 |
features &= ~VTNET_LRO_FEATURES; |
602 |
sc->vtnet_features = |
694 |
negotiated_features = |
603 |
virtio_negotiate_features(dev, features); |
695 |
virtio_negotiate_features(dev, features); |
604 |
} else |
696 |
} else |
605 |
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; |
697 |
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; |
606 |
} |
698 |
} |
|
|
699 |
|
700 |
sc->vtnet_features = negotiated_features; |
701 |
sc->vtnet_negotiated_features = negotiated_features; |
702 |
|
703 |
return (virtio_finalize_features(dev)); |
607 |
} |
704 |
} |
608 |
|
705 |
|
609 |
static void |
706 |
static int |
610 |
vtnet_setup_features(struct vtnet_softc *sc) |
707 |
vtnet_setup_features(struct vtnet_softc *sc) |
611 |
{ |
708 |
{ |
612 |
device_t dev; |
709 |
device_t dev; |
|
|
710 |
int error; |
613 |
|
711 |
|
614 |
dev = sc->vtnet_dev; |
712 |
dev = sc->vtnet_dev; |
615 |
|
713 |
|
616 |
vtnet_negotiate_features(sc); |
714 |
error = vtnet_negotiate_features(sc); |
|
|
715 |
if (error) |
716 |
return (error); |
617 |
|
717 |
|
|
|
718 |
if (virtio_with_feature(dev, VIRTIO_F_VERSION_1)) |
719 |
sc->vtnet_flags |= VTNET_FLAG_MODERN; |
618 |
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) |
720 |
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) |
619 |
sc->vtnet_flags |= VTNET_FLAG_INDIRECT; |
721 |
sc->vtnet_flags |= VTNET_FLAG_INDIRECT; |
620 |
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) |
722 |
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) |
Lines 625-650
Link Here
|
625 |
sc->vtnet_flags |= VTNET_FLAG_MAC; |
727 |
sc->vtnet_flags |= VTNET_FLAG_MAC; |
626 |
} |
728 |
} |
627 |
|
729 |
|
|
|
730 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { |
731 |
sc->vtnet_max_mtu = virtio_read_dev_config_2(dev, |
732 |
offsetof(struct virtio_net_config, mtu)); |
733 |
} else |
734 |
sc->vtnet_max_mtu = VTNET_MAX_MTU; |
735 |
|
628 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { |
736 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { |
629 |
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; |
737 |
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; |
630 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
738 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
|
|
739 |
} else if (vtnet_modern(sc)) { |
740 |
/* This is identical to the mergeable header. */ |
741 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1); |
631 |
} else |
742 |
} else |
632 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); |
743 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); |
633 |
|
744 |
|
634 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
745 |
if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
635 |
sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; |
746 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE; |
636 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
747 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
637 |
sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; |
748 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG; |
638 |
else |
749 |
else |
639 |
sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; |
750 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE; |
640 |
|
751 |
|
|
|
752 |
/* |
753 |
* Favor "hardware" LRO if negotiated, but support software LRO as |
754 |
* a fallback; there is usually little benefit (or worse) with both. |
755 |
*/ |
756 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 && |
757 |
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0) |
758 |
sc->vtnet_flags |= VTNET_FLAG_SW_LRO; |
759 |
|
641 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || |
760 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || |
642 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || |
761 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || |
643 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
762 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
644 |
sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; |
763 |
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX; |
645 |
else |
764 |
else |
646 |
sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; |
765 |
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN; |
647 |
|
766 |
|
|
|
767 |
sc->vtnet_req_vq_pairs = 1; |
768 |
sc->vtnet_max_vq_pairs = 1; |
769 |
|
648 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { |
770 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { |
649 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; |
771 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; |
650 |
|
772 |
|
Lines 654-688
Link Here
|
654 |
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; |
776 |
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; |
655 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) |
777 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) |
656 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; |
778 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; |
|
|
779 |
|
780 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { |
781 |
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, |
782 |
offsetof(struct virtio_net_config, |
783 |
max_virtqueue_pairs)); |
784 |
} |
657 |
} |
785 |
} |
658 |
|
786 |
|
659 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && |
|
|
660 |
sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
661 |
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, |
662 |
offsetof(struct virtio_net_config, max_virtqueue_pairs)); |
663 |
} else |
664 |
sc->vtnet_max_vq_pairs = 1; |
665 |
|
666 |
if (sc->vtnet_max_vq_pairs > 1) { |
787 |
if (sc->vtnet_max_vq_pairs > 1) { |
|
|
788 |
int req; |
789 |
|
667 |
/* |
790 |
/* |
668 |
* Limit the maximum number of queue pairs to the lower of |
791 |
* Limit the maximum number of requested queue pairs to the |
669 |
* the number of CPUs and the configured maximum. |
792 |
* number of CPUs and the configured maximum. |
670 |
* The actual number of queues that get used may be less. |
|
|
671 |
*/ |
793 |
*/ |
672 |
int max; |
794 |
req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); |
673 |
|
795 |
if (req < 0) |
674 |
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); |
796 |
req = 1; |
675 |
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) { |
797 |
if (req == 0) |
676 |
if (max > mp_ncpus) |
798 |
req = mp_ncpus; |
677 |
max = mp_ncpus; |
799 |
if (req > sc->vtnet_max_vq_pairs) |
678 |
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) |
800 |
req = sc->vtnet_max_vq_pairs; |
679 |
max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX; |
801 |
if (req > mp_ncpus) |
680 |
if (max > 1) { |
802 |
req = mp_ncpus; |
681 |
sc->vtnet_requested_vq_pairs = max; |
803 |
if (req > 1) { |
682 |
sc->vtnet_flags |= VTNET_FLAG_MULTIQ; |
804 |
sc->vtnet_req_vq_pairs = req; |
683 |
} |
805 |
sc->vtnet_flags |= VTNET_FLAG_MQ; |
684 |
} |
806 |
} |
685 |
} |
807 |
} |
|
|
808 |
|
809 |
return (0); |
686 |
} |
810 |
} |
687 |
|
811 |
|
688 |
static int |
812 |
static int |
Lines 703-708
Link Here
|
703 |
if (rxq->vtnrx_sg == NULL) |
827 |
if (rxq->vtnrx_sg == NULL) |
704 |
return (ENOMEM); |
828 |
return (ENOMEM); |
705 |
|
829 |
|
|
|
830 |
#if defined(INET) || defined(INET6) |
831 |
if (vtnet_software_lro(sc)) { |
832 |
if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp, |
833 |
sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0) |
834 |
return (ENOMEM); |
835 |
} |
836 |
#endif |
837 |
|
706 |
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); |
838 |
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); |
707 |
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, |
839 |
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, |
708 |
taskqueue_thread_enqueue, &rxq->vtnrx_tq); |
840 |
taskqueue_thread_enqueue, &rxq->vtnrx_tq); |
Lines 768-773
Link Here
|
768 |
return (error); |
900 |
return (error); |
769 |
} |
901 |
} |
770 |
|
902 |
|
|
|
903 |
vtnet_set_rx_process_limit(sc); |
771 |
vtnet_setup_queue_sysctl(sc); |
904 |
vtnet_setup_queue_sysctl(sc); |
772 |
|
905 |
|
773 |
return (0); |
906 |
return (0); |
Lines 780-785
Link Here
|
780 |
rxq->vtnrx_sc = NULL; |
913 |
rxq->vtnrx_sc = NULL; |
781 |
rxq->vtnrx_id = -1; |
914 |
rxq->vtnrx_id = -1; |
782 |
|
915 |
|
|
|
916 |
#if defined(INET) || defined(INET6) |
917 |
tcp_lro_free(&rxq->vtnrx_lro); |
918 |
#endif |
919 |
|
783 |
if (rxq->vtnrx_sg != NULL) { |
920 |
if (rxq->vtnrx_sg != NULL) { |
784 |
sglist_free(rxq->vtnrx_sg); |
921 |
sglist_free(rxq->vtnrx_sg); |
785 |
rxq->vtnrx_sg = NULL; |
922 |
rxq->vtnrx_sg = NULL; |
Lines 888-915
Link Here
|
888 |
if (info == NULL) |
1025 |
if (info == NULL) |
889 |
return (ENOMEM); |
1026 |
return (ENOMEM); |
890 |
|
1027 |
|
891 |
for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { |
1028 |
for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) { |
892 |
rxq = &sc->vtnet_rxqs[i]; |
1029 |
rxq = &sc->vtnet_rxqs[i]; |
893 |
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, |
1030 |
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, |
894 |
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, |
1031 |
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, |
895 |
"%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); |
1032 |
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); |
896 |
|
1033 |
|
897 |
txq = &sc->vtnet_txqs[i]; |
1034 |
txq = &sc->vtnet_txqs[i]; |
898 |
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, |
1035 |
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, |
899 |
vtnet_tx_vq_intr, txq, &txq->vtntx_vq, |
1036 |
vtnet_tx_vq_intr, txq, &txq->vtntx_vq, |
900 |
"%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); |
1037 |
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); |
901 |
} |
1038 |
} |
902 |
|
1039 |
|
|
|
1040 |
/* These queues will not be used so allocate the minimum resources. */ |
1041 |
for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) { |
1042 |
rxq = &sc->vtnet_rxqs[i]; |
1043 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq, |
1044 |
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); |
1045 |
|
1046 |
txq = &sc->vtnet_txqs[i]; |
1047 |
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq, |
1048 |
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); |
1049 |
} |
1050 |
|
903 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
1051 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
904 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, |
1052 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, |
905 |
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); |
1053 |
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); |
906 |
} |
1054 |
} |
907 |
|
1055 |
|
908 |
/* |
1056 |
/* |
909 |
* Enable interrupt binding if this is multiqueue. This only matters |
1057 |
* TODO: Enable interrupt binding if this is multiqueue. This will |
910 |
* when per-vq MSIX is available. |
1058 |
* only matter when per-virtqueue MSIX is available. |
911 |
*/ |
1059 |
*/ |
912 |
if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) |
1060 |
if (sc->vtnet_flags & VTNET_FLAG_MQ) |
913 |
flags |= 0; |
1061 |
flags |= 0; |
914 |
|
1062 |
|
915 |
error = virtio_alloc_virtqueues(dev, flags, nvqs, info); |
1063 |
error = virtio_alloc_virtqueues(dev, flags, nvqs, info); |
Lines 919-941
Link Here
|
919 |
} |
1067 |
} |
920 |
|
1068 |
|
921 |
static int |
1069 |
static int |
922 |
vtnet_setup_interface(struct vtnet_softc *sc) |
1070 |
vtnet_alloc_interface(struct vtnet_softc *sc) |
923 |
{ |
1071 |
{ |
924 |
device_t dev; |
1072 |
device_t dev; |
925 |
struct ifnet *ifp; |
1073 |
struct ifnet *ifp; |
926 |
|
1074 |
|
927 |
dev = sc->vtnet_dev; |
1075 |
dev = sc->vtnet_dev; |
928 |
|
1076 |
|
929 |
ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); |
1077 |
ifp = if_alloc(IFT_ETHER); |
930 |
if (ifp == NULL) { |
1078 |
if (ifp == NULL) |
931 |
device_printf(dev, "cannot allocate ifnet structure\n"); |
1079 |
return (ENOMEM); |
932 |
return (ENOSPC); |
|
|
933 |
} |
934 |
|
1080 |
|
935 |
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
1081 |
sc->vtnet_ifp = ifp; |
936 |
ifp->if_baudrate = IF_Gbps(10); /* Approx. */ |
|
|
937 |
ifp->if_softc = sc; |
1082 |
ifp->if_softc = sc; |
|
|
1083 |
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
1084 |
|
1085 |
return (0); |
1086 |
} |
1087 |
|
1088 |
static int |
1089 |
vtnet_setup_interface(struct vtnet_softc *sc) |
1090 |
{ |
1091 |
device_t dev; |
1092 |
struct ifnet *ifp; |
1093 |
|
1094 |
dev = sc->vtnet_dev; |
1095 |
ifp = sc->vtnet_ifp; |
1096 |
|
938 |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1097 |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
|
|
1098 |
ifp->if_baudrate = IF_Gbps(10); |
939 |
ifp->if_init = vtnet_init; |
1099 |
ifp->if_init = vtnet_init; |
940 |
ifp->if_ioctl = vtnet_ioctl; |
1100 |
ifp->if_ioctl = vtnet_ioctl; |
941 |
ifp->if_get_counter = vtnet_get_counter; |
1101 |
ifp->if_get_counter = vtnet_get_counter; |
Lines 950-1000
Link Here
|
950 |
IFQ_SET_READY(&ifp->if_snd); |
1110 |
IFQ_SET_READY(&ifp->if_snd); |
951 |
#endif |
1111 |
#endif |
952 |
|
1112 |
|
953 |
ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, |
1113 |
vtnet_get_macaddr(sc); |
954 |
vtnet_ifmedia_sts); |
|
|
955 |
ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); |
956 |
ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); |
957 |
|
1114 |
|
958 |
/* Read (or generate) the MAC address for the adapter. */ |
|
|
959 |
vtnet_get_hwaddr(sc); |
960 |
|
961 |
ether_ifattach(ifp, sc->vtnet_hwaddr); |
962 |
|
963 |
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) |
1115 |
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) |
964 |
ifp->if_capabilities |= IFCAP_LINKSTATE; |
1116 |
ifp->if_capabilities |= IFCAP_LINKSTATE; |
965 |
|
1117 |
|
966 |
/* Tell the upper layer(s) we support long frames. */ |
1118 |
ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts); |
967 |
ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1119 |
ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL); |
968 |
ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; |
1120 |
ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO); |
969 |
|
1121 |
|
970 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { |
1122 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { |
|
|
1123 |
int gso; |
1124 |
|
971 |
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; |
1125 |
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; |
972 |
|
1126 |
|
973 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { |
1127 |
gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO); |
974 |
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; |
1128 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) |
|
|
1129 |
ifp->if_capabilities |= IFCAP_TSO4; |
1130 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
1131 |
ifp->if_capabilities |= IFCAP_TSO6; |
1132 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) |
975 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
1133 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
976 |
} else { |
|
|
977 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) |
978 |
ifp->if_capabilities |= IFCAP_TSO4; |
979 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
980 |
ifp->if_capabilities |= IFCAP_TSO6; |
981 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) |
982 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
983 |
} |
984 |
|
1134 |
|
985 |
if (ifp->if_capabilities & IFCAP_TSO) |
1135 |
if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) { |
|
|
1136 |
int tso_maxlen; |
1137 |
|
986 |
ifp->if_capabilities |= IFCAP_VLAN_HWTSO; |
1138 |
ifp->if_capabilities |= IFCAP_VLAN_HWTSO; |
|
|
1139 |
|
1140 |
tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen", |
1141 |
vtnet_tso_maxlen); |
1142 |
ifp->if_hw_tsomax = tso_maxlen - |
1143 |
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); |
1144 |
ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1; |
1145 |
ifp->if_hw_tsomaxsegsize = PAGE_SIZE; |
1146 |
} |
987 |
} |
1147 |
} |
988 |
|
1148 |
|
989 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { |
1149 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { |
990 |
ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; |
1150 |
ifp->if_capabilities |= IFCAP_RXCSUM; |
|
|
1151 |
#ifdef notyet |
1152 |
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */ |
1153 |
ifp->if_capabilities |= IFCAP_RXCSUM_IPV6; |
1154 |
#endif |
991 |
|
1155 |
|
992 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || |
1156 |
if (vtnet_tunable_int(sc, "fixup_needs_csum", |
993 |
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) |
1157 |
vtnet_fixup_needs_csum) != 0) |
994 |
ifp->if_capabilities |= IFCAP_LRO; |
1158 |
sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM; |
|
|
1159 |
|
1160 |
/* Support either "hardware" or software LRO. */ |
1161 |
ifp->if_capabilities |= IFCAP_LRO; |
995 |
} |
1162 |
} |
996 |
|
1163 |
|
997 |
if (ifp->if_capabilities & IFCAP_HWCSUM) { |
1164 |
if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) { |
998 |
/* |
1165 |
/* |
999 |
* VirtIO does not support VLAN tagging, but we can fake |
1166 |
* VirtIO does not support VLAN tagging, but we can fake |
1000 |
* it by inserting and removing the 802.1Q header during |
1167 |
* it by inserting and removing the 802.1Q header during |
Lines 1005-1015
Link Here
|
1005 |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; |
1172 |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; |
1006 |
} |
1173 |
} |
1007 |
|
1174 |
|
1008 |
ifp->if_capenable = ifp->if_capabilities; |
1175 |
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO) |
|
|
1176 |
ifp->if_capabilities |= IFCAP_JUMBO_MTU; |
1177 |
ifp->if_capabilities |= IFCAP_VLAN_MTU; |
1009 |
|
1178 |
|
1010 |
/* |
1179 |
/* |
1011 |
* Capabilities after here are not enabled by default. |
1180 |
* Capabilities after here are not enabled by default. |
1012 |
*/ |
1181 |
*/ |
|
|
1182 |
ifp->if_capenable = ifp->if_capabilities; |
1013 |
|
1183 |
|
1014 |
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { |
1184 |
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { |
1015 |
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; |
1185 |
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; |
Lines 1020-1076
Link Here
|
1020 |
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); |
1190 |
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); |
1021 |
} |
1191 |
} |
1022 |
|
1192 |
|
1023 |
vtnet_set_rx_process_limit(sc); |
1193 |
ether_ifattach(ifp, sc->vtnet_hwaddr); |
1024 |
vtnet_set_tx_intr_threshold(sc); |
|
|
1025 |
|
1194 |
|
|
|
1195 |
/* Tell the upper layer(s) we support long frames. */ |
1196 |
ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1197 |
|
1026 |
return (0); |
1198 |
return (0); |
1027 |
} |
1199 |
} |
1028 |
|
1200 |
|
1029 |
static int |
1201 |
static int |
1030 |
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) |
1202 |
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu) |
1031 |
{ |
1203 |
{ |
|
|
1204 |
int framesz; |
1205 |
|
1206 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
1207 |
return (MJUMPAGESIZE); |
1208 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
1209 |
return (MCLBYTES); |
1210 |
|
1211 |
/* |
1212 |
* Try to scale the receive mbuf cluster size from the MTU. Without |
1213 |
* the GUEST_TSO[46] features, the VirtIO specification says the |
1214 |
* driver must only be able to receive ~1500 byte frames. But if |
1215 |
* jumbo frames can be transmitted then try to receive jumbo. |
1216 |
* |
1217 |
* BMV: Not quite true when F_MTU is negotiated! |
1218 |
*/ |
1219 |
if (vtnet_modern(sc)) { |
1220 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1)); |
1221 |
framesz = sizeof(struct virtio_net_hdr_v1); |
1222 |
} else |
1223 |
framesz = sizeof(struct vtnet_rx_header); |
1224 |
framesz += sizeof(struct ether_vlan_header) + mtu; |
1225 |
|
1226 |
if (framesz <= MCLBYTES) |
1227 |
return (MCLBYTES); |
1228 |
else if (framesz <= MJUMPAGESIZE) |
1229 |
return (MJUMPAGESIZE); |
1230 |
else if (framesz <= MJUM9BYTES) |
1231 |
return (MJUM9BYTES); |
1232 |
|
1233 |
/* Sane default; avoid 16KB clusters. */ |
1234 |
return (MCLBYTES); |
1235 |
} |
1236 |
|
1237 |
static int |
1238 |
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu) |
1239 |
{ |
1032 |
struct ifnet *ifp; |
1240 |
struct ifnet *ifp; |
1033 |
int frame_size, clsize; |
1241 |
int clustersz; |
1034 |
|
1242 |
|
1035 |
ifp = sc->vtnet_ifp; |
1243 |
ifp = sc->vtnet_ifp; |
|
|
1244 |
VTNET_CORE_LOCK_ASSERT(sc); |
1036 |
|
1245 |
|
1037 |
if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) |
1246 |
if (ifp->if_mtu == mtu) |
|
|
1247 |
return (0); |
1248 |
else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu) |
1038 |
return (EINVAL); |
1249 |
return (EINVAL); |
1039 |
|
1250 |
|
1040 |
frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + |
1251 |
ifp->if_mtu = mtu; |
1041 |
new_mtu; |
1252 |
clustersz = vtnet_rx_cluster_size(sc, mtu); |
1042 |
|
1253 |
|
1043 |
/* |
1254 |
if (clustersz != sc->vtnet_rx_clustersz && |
1044 |
* Based on the new MTU (and hence frame size) determine which |
1255 |
ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1045 |
* cluster size is most appropriate for the receive queues. |
1256 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1046 |
*/ |
1257 |
vtnet_init_locked(sc); |
1047 |
if (frame_size <= MCLBYTES) { |
1258 |
} |
1048 |
clsize = MCLBYTES; |
|
|
1049 |
} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
1050 |
/* Avoid going past 9K jumbos. */ |
1051 |
if (frame_size > MJUM9BYTES) |
1052 |
return (EINVAL); |
1053 |
clsize = MJUM9BYTES; |
1054 |
} else |
1055 |
clsize = MJUMPAGESIZE; |
1056 |
|
1259 |
|
1057 |
ifp->if_mtu = new_mtu; |
1260 |
return (0); |
1058 |
sc->vtnet_rx_new_clsize = clsize; |
1261 |
} |
1059 |
|
1262 |
|
1060 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1263 |
static int |
1061 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1264 |
vtnet_ioctl_ifflags(struct vtnet_softc *sc) |
|
|
1265 |
{ |
1266 |
struct ifnet *ifp; |
1267 |
int drv_running; |
1268 |
|
1269 |
ifp = sc->vtnet_ifp; |
1270 |
drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; |
1271 |
|
1272 |
VTNET_CORE_LOCK_ASSERT(sc); |
1273 |
|
1274 |
if ((ifp->if_flags & IFF_UP) == 0) { |
1275 |
if (drv_running) |
1276 |
vtnet_stop(sc); |
1277 |
goto out; |
1278 |
} |
1279 |
|
1280 |
if (!drv_running) { |
1062 |
vtnet_init_locked(sc); |
1281 |
vtnet_init_locked(sc); |
|
|
1282 |
goto out; |
1063 |
} |
1283 |
} |
1064 |
|
1284 |
|
|
|
1285 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) & |
1286 |
(IFF_PROMISC | IFF_ALLMULTI)) { |
1287 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) |
1288 |
return (ENOTSUP); |
1289 |
vtnet_rx_filter(sc); |
1290 |
} |
1291 |
|
1292 |
out: |
1293 |
sc->vtnet_if_flags = ifp->if_flags; |
1065 |
return (0); |
1294 |
return (0); |
1066 |
} |
1295 |
} |
1067 |
|
1296 |
|
1068 |
static int |
1297 |
static int |
|
|
1298 |
vtnet_ioctl_multi(struct vtnet_softc *sc) |
1299 |
{ |
1300 |
struct ifnet *ifp; |
1301 |
|
1302 |
ifp = sc->vtnet_ifp; |
1303 |
|
1304 |
VTNET_CORE_LOCK_ASSERT(sc); |
1305 |
|
1306 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX && |
1307 |
ifp->if_drv_flags & IFF_DRV_RUNNING) |
1308 |
vtnet_rx_filter_mac(sc); |
1309 |
|
1310 |
return (0); |
1311 |
} |
1312 |
|
1313 |
static int |
1314 |
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr) |
1315 |
{ |
1316 |
struct ifnet *ifp; |
1317 |
int mask, reinit, update; |
1318 |
|
1319 |
ifp = sc->vtnet_ifp; |
1320 |
mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable; |
1321 |
reinit = update = 0; |
1322 |
|
1323 |
VTNET_CORE_LOCK_ASSERT(sc); |
1324 |
|
1325 |
if (mask & IFCAP_TXCSUM) |
1326 |
ifp->if_capenable ^= IFCAP_TXCSUM; |
1327 |
if (mask & IFCAP_TXCSUM_IPV6) |
1328 |
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; |
1329 |
if (mask & IFCAP_TSO4) |
1330 |
ifp->if_capenable ^= IFCAP_TSO4; |
1331 |
if (mask & IFCAP_TSO6) |
1332 |
ifp->if_capenable ^= IFCAP_TSO6; |
1333 |
|
1334 |
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) { |
1335 |
/* |
1336 |
* These Rx features require the negotiated features to |
1337 |
* be updated. Avoid a full reinit if possible. |
1338 |
*/ |
1339 |
if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
1340 |
update = 1; |
1341 |
else |
1342 |
reinit = 1; |
1343 |
|
1344 |
/* BMV: Avoid needless renegotiation for just software LRO. */ |
1345 |
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) == |
1346 |
IFCAP_LRO && vtnet_software_lro(sc)) |
1347 |
reinit = update = 0; |
1348 |
|
1349 |
if (mask & IFCAP_RXCSUM) |
1350 |
ifp->if_capenable ^= IFCAP_RXCSUM; |
1351 |
if (mask & IFCAP_RXCSUM_IPV6) |
1352 |
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; |
1353 |
if (mask & IFCAP_LRO) |
1354 |
ifp->if_capenable ^= IFCAP_LRO; |
1355 |
|
1356 |
/* |
1357 |
* VirtIO does not distinguish between IPv4 and IPv6 checksums |
1358 |
* so treat them as a pair. Guest TSO (LRO) requires receive |
1359 |
* checksums. |
1360 |
*/ |
1361 |
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { |
1362 |
ifp->if_capenable |= IFCAP_RXCSUM; |
1363 |
#ifdef notyet |
1364 |
ifp->if_capenable |= IFCAP_RXCSUM_IPV6; |
1365 |
#endif |
1366 |
} else |
1367 |
ifp->if_capenable &= |
1368 |
~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO); |
1369 |
} |
1370 |
|
1371 |
if (mask & IFCAP_VLAN_HWFILTER) { |
1372 |
/* These Rx features require renegotiation. */ |
1373 |
reinit = 1; |
1374 |
|
1375 |
if (mask & IFCAP_VLAN_HWFILTER) |
1376 |
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; |
1377 |
} |
1378 |
|
1379 |
if (mask & IFCAP_VLAN_HWTSO) |
1380 |
ifp->if_capenable ^= IFCAP_VLAN_HWTSO; |
1381 |
if (mask & IFCAP_VLAN_HWTAGGING) |
1382 |
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; |
1383 |
|
1384 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1385 |
if (reinit) { |
1386 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1387 |
vtnet_init_locked(sc); |
1388 |
} else if (update) |
1389 |
vtnet_update_rx_offloads(sc); |
1390 |
} |
1391 |
|
1392 |
return (0); |
1393 |
} |
1394 |
|
1395 |
static int |
1069 |
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1396 |
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1070 |
{ |
1397 |
{ |
1071 |
struct vtnet_softc *sc; |
1398 |
struct vtnet_softc *sc; |
1072 |
struct ifreq *ifr; |
1399 |
struct ifreq *ifr; |
1073 |
int reinit, mask, error; |
1400 |
int error; |
1074 |
|
1401 |
|
1075 |
sc = ifp->if_softc; |
1402 |
sc = ifp->if_softc; |
1076 |
ifr = (struct ifreq *) data; |
1403 |
ifr = (struct ifreq *) data; |
Lines 1078-1122
Link Here
|
1078 |
|
1405 |
|
1079 |
switch (cmd) { |
1406 |
switch (cmd) { |
1080 |
case SIOCSIFMTU: |
1407 |
case SIOCSIFMTU: |
1081 |
if (ifp->if_mtu != ifr->ifr_mtu) { |
1408 |
VTNET_CORE_LOCK(sc); |
1082 |
VTNET_CORE_LOCK(sc); |
1409 |
error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu); |
1083 |
error = vtnet_change_mtu(sc, ifr->ifr_mtu); |
1410 |
VTNET_CORE_UNLOCK(sc); |
1084 |
VTNET_CORE_UNLOCK(sc); |
|
|
1085 |
} |
1086 |
break; |
1411 |
break; |
1087 |
|
1412 |
|
1088 |
case SIOCSIFFLAGS: |
1413 |
case SIOCSIFFLAGS: |
1089 |
VTNET_CORE_LOCK(sc); |
1414 |
VTNET_CORE_LOCK(sc); |
1090 |
if ((ifp->if_flags & IFF_UP) == 0) { |
1415 |
error = vtnet_ioctl_ifflags(sc); |
1091 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
|
|
1092 |
vtnet_stop(sc); |
1093 |
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1094 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) & |
1095 |
(IFF_PROMISC | IFF_ALLMULTI)) { |
1096 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) |
1097 |
vtnet_rx_filter(sc); |
1098 |
else { |
1099 |
ifp->if_flags |= IFF_PROMISC; |
1100 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) |
1101 |
& IFF_ALLMULTI) |
1102 |
error = ENOTSUP; |
1103 |
} |
1104 |
} |
1105 |
} else |
1106 |
vtnet_init_locked(sc); |
1107 |
|
1108 |
if (error == 0) |
1109 |
sc->vtnet_if_flags = ifp->if_flags; |
1110 |
VTNET_CORE_UNLOCK(sc); |
1416 |
VTNET_CORE_UNLOCK(sc); |
1111 |
break; |
1417 |
break; |
1112 |
|
1418 |
|
1113 |
case SIOCADDMULTI: |
1419 |
case SIOCADDMULTI: |
1114 |
case SIOCDELMULTI: |
1420 |
case SIOCDELMULTI: |
1115 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) |
|
|
1116 |
break; |
1117 |
VTNET_CORE_LOCK(sc); |
1421 |
VTNET_CORE_LOCK(sc); |
1118 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
1422 |
error = vtnet_ioctl_multi(sc); |
1119 |
vtnet_rx_filter_mac(sc); |
|
|
1120 |
VTNET_CORE_UNLOCK(sc); |
1423 |
VTNET_CORE_UNLOCK(sc); |
1121 |
break; |
1424 |
break; |
1122 |
|
1425 |
|
Lines 1127-1172
Link Here
|
1127 |
|
1430 |
|
1128 |
case SIOCSIFCAP: |
1431 |
case SIOCSIFCAP: |
1129 |
VTNET_CORE_LOCK(sc); |
1432 |
VTNET_CORE_LOCK(sc); |
1130 |
mask = ifr->ifr_reqcap ^ ifp->if_capenable; |
1433 |
error = vtnet_ioctl_ifcap(sc, ifr); |
1131 |
|
|
|
1132 |
if (mask & IFCAP_TXCSUM) |
1133 |
ifp->if_capenable ^= IFCAP_TXCSUM; |
1134 |
if (mask & IFCAP_TXCSUM_IPV6) |
1135 |
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; |
1136 |
if (mask & IFCAP_TSO4) |
1137 |
ifp->if_capenable ^= IFCAP_TSO4; |
1138 |
if (mask & IFCAP_TSO6) |
1139 |
ifp->if_capenable ^= IFCAP_TSO6; |
1140 |
|
1141 |
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | |
1142 |
IFCAP_VLAN_HWFILTER)) { |
1143 |
/* These Rx features require us to renegotiate. */ |
1144 |
reinit = 1; |
1145 |
|
1146 |
if (mask & IFCAP_RXCSUM) |
1147 |
ifp->if_capenable ^= IFCAP_RXCSUM; |
1148 |
if (mask & IFCAP_RXCSUM_IPV6) |
1149 |
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; |
1150 |
if (mask & IFCAP_LRO) |
1151 |
ifp->if_capenable ^= IFCAP_LRO; |
1152 |
if (mask & IFCAP_VLAN_HWFILTER) |
1153 |
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; |
1154 |
} else |
1155 |
reinit = 0; |
1156 |
|
1157 |
if (mask & IFCAP_VLAN_HWTSO) |
1158 |
ifp->if_capenable ^= IFCAP_VLAN_HWTSO; |
1159 |
if (mask & IFCAP_VLAN_HWTAGGING) |
1160 |
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; |
1161 |
|
1162 |
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { |
1163 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1164 |
vtnet_init_locked(sc); |
1165 |
} |
1166 |
|
1167 |
VTNET_CORE_UNLOCK(sc); |
1434 |
VTNET_CORE_UNLOCK(sc); |
1168 |
VLAN_CAPABILITIES(ifp); |
1435 |
VLAN_CAPABILITIES(ifp); |
1169 |
|
|
|
1170 |
break; |
1436 |
break; |
1171 |
|
1437 |
|
1172 |
default: |
1438 |
default: |
Lines 1185-1196
Link Here
|
1185 |
struct virtqueue *vq; |
1451 |
struct virtqueue *vq; |
1186 |
int nbufs, error; |
1452 |
int nbufs, error; |
1187 |
|
1453 |
|
1188 |
#ifdef DEV_NETMAP |
|
|
1189 |
error = vtnet_netmap_rxq_populate(rxq); |
1190 |
if (error >= 0) |
1191 |
return (error); |
1192 |
#endif /* DEV_NETMAP */ |
1193 |
|
1194 |
vq = rxq->vtnrx_vq; |
1454 |
vq = rxq->vtnrx_vq; |
1195 |
error = ENOSPC; |
1455 |
error = ENOSPC; |
1196 |
|
1456 |
|
Lines 1220-1239
Link Here
|
1220 |
struct virtqueue *vq; |
1480 |
struct virtqueue *vq; |
1221 |
struct mbuf *m; |
1481 |
struct mbuf *m; |
1222 |
int last; |
1482 |
int last; |
1223 |
#ifdef DEV_NETMAP |
|
|
1224 |
int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX, |
1225 |
rxq->vtnrx_id); |
1226 |
#else /* !DEV_NETMAP */ |
1227 |
int netmap_bufs = 0; |
1228 |
#endif /* !DEV_NETMAP */ |
1229 |
|
1483 |
|
1230 |
vq = rxq->vtnrx_vq; |
1484 |
vq = rxq->vtnrx_vq; |
1231 |
last = 0; |
1485 |
last = 0; |
1232 |
|
1486 |
|
1233 |
while ((m = virtqueue_drain(vq, &last)) != NULL) { |
1487 |
while ((m = virtqueue_drain(vq, &last)) != NULL) |
1234 |
if (!netmap_bufs) |
1488 |
m_freem(m); |
1235 |
m_freem(m); |
|
|
1236 |
} |
1237 |
|
1489 |
|
1238 |
KASSERT(virtqueue_empty(vq), |
1490 |
KASSERT(virtqueue_empty(vq), |
1239 |
("%s: mbufs remaining in rx queue %p", __func__, rxq)); |
1491 |
("%s: mbufs remaining in rx queue %p", __func__, rxq)); |
Lines 1243-1299
Link Here
|
1243 |
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) |
1495 |
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) |
1244 |
{ |
1496 |
{ |
1245 |
struct mbuf *m_head, *m_tail, *m; |
1497 |
struct mbuf *m_head, *m_tail, *m; |
1246 |
int i, clsize; |
1498 |
int i, size; |
1247 |
|
1499 |
|
1248 |
clsize = sc->vtnet_rx_clsize; |
1500 |
m_head = NULL; |
|
|
1501 |
size = sc->vtnet_rx_clustersz; |
1249 |
|
1502 |
|
1250 |
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
1503 |
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
1251 |
("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); |
1504 |
("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs)); |
1252 |
|
1505 |
|
1253 |
m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); |
1506 |
for (i = 0; i < nbufs; i++) { |
1254 |
if (m_head == NULL) |
1507 |
m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size); |
1255 |
goto fail; |
1508 |
if (m == NULL) { |
|
|
1509 |
sc->vtnet_stats.mbuf_alloc_failed++; |
1510 |
m_freem(m_head); |
1511 |
return (NULL); |
1512 |
} |
1256 |
|
1513 |
|
1257 |
m_head->m_len = clsize; |
1514 |
m->m_len = size; |
1258 |
m_tail = m_head; |
1515 |
if (m_head != NULL) { |
1259 |
|
1516 |
m_tail->m_next = m; |
1260 |
/* Allocate the rest of the chain. */ |
1517 |
m_tail = m; |
1261 |
for (i = 1; i < nbufs; i++) { |
1518 |
} else |
1262 |
m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); |
1519 |
m_head = m_tail = m; |
1263 |
if (m == NULL) |
|
|
1264 |
goto fail; |
1265 |
|
1266 |
m->m_len = clsize; |
1267 |
m_tail->m_next = m; |
1268 |
m_tail = m; |
1269 |
} |
1520 |
} |
1270 |
|
1521 |
|
1271 |
if (m_tailp != NULL) |
1522 |
if (m_tailp != NULL) |
1272 |
*m_tailp = m_tail; |
1523 |
*m_tailp = m_tail; |
1273 |
|
1524 |
|
1274 |
return (m_head); |
1525 |
return (m_head); |
1275 |
|
|
|
1276 |
fail: |
1277 |
sc->vtnet_stats.mbuf_alloc_failed++; |
1278 |
m_freem(m_head); |
1279 |
|
1280 |
return (NULL); |
1281 |
} |
1526 |
} |
1282 |
|
1527 |
|
1283 |
/* |
1528 |
/* |
1284 |
* Slow path for when LRO without mergeable buffers is negotiated. |
1529 |
* Slow path for when LRO without mergeable buffers is negotiated. |
1285 |
*/ |
1530 |
*/ |
1286 |
static int |
1531 |
static int |
1287 |
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, |
1532 |
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0, |
1288 |
int len0) |
1533 |
int len0) |
1289 |
{ |
1534 |
{ |
1290 |
struct vtnet_softc *sc; |
1535 |
struct vtnet_softc *sc; |
1291 |
struct mbuf *m, *m_prev; |
1536 |
struct mbuf *m, *m_prev, *m_new, *m_tail; |
1292 |
struct mbuf *m_new, *m_tail; |
1537 |
int len, clustersz, nreplace, error; |
1293 |
int len, clsize, nreplace, error; |
|
|
1294 |
|
1538 |
|
1295 |
sc = rxq->vtnrx_sc; |
1539 |
sc = rxq->vtnrx_sc; |
1296 |
clsize = sc->vtnet_rx_clsize; |
1540 |
clustersz = sc->vtnet_rx_clustersz; |
1297 |
|
1541 |
|
1298 |
m_prev = NULL; |
1542 |
m_prev = NULL; |
1299 |
m_tail = NULL; |
1543 |
m_tail = NULL; |
Lines 1303-1327
Link Here
|
1303 |
len = len0; |
1547 |
len = len0; |
1304 |
|
1548 |
|
1305 |
/* |
1549 |
/* |
1306 |
* Since these mbuf chains are so large, we avoid allocating an |
1550 |
* Since these mbuf chains are so large, avoid allocating a complete |
1307 |
* entire replacement chain if possible. When the received frame |
1551 |
* replacement when the received frame did not consume the entire |
1308 |
* did not consume the entire chain, the unused mbufs are moved |
1552 |
* chain. Unused mbufs are moved to the tail of the replacement mbuf. |
1309 |
* to the replacement chain. |
|
|
1310 |
*/ |
1553 |
*/ |
1311 |
while (len > 0) { |
1554 |
while (len > 0) { |
1312 |
/* |
|
|
1313 |
* Something is seriously wrong if we received a frame |
1314 |
* larger than the chain. Drop it. |
1315 |
*/ |
1316 |
if (m == NULL) { |
1555 |
if (m == NULL) { |
1317 |
sc->vtnet_stats.rx_frame_too_large++; |
1556 |
sc->vtnet_stats.rx_frame_too_large++; |
1318 |
return (EMSGSIZE); |
1557 |
return (EMSGSIZE); |
1319 |
} |
1558 |
} |
1320 |
|
1559 |
|
1321 |
/* We always allocate the same cluster size. */ |
1560 |
/* |
1322 |
KASSERT(m->m_len == clsize, |
1561 |
* Every mbuf should have the expected cluster size sincethat |
1323 |
("%s: mbuf size %d is not the cluster size %d", |
1562 |
* is also used to allocate the replacements. |
1324 |
__func__, m->m_len, clsize)); |
1563 |
*/ |
|
|
1564 |
KASSERT(m->m_len == clustersz, |
1565 |
("%s: mbuf size %d not expected cluster size %d", __func__, |
1566 |
m->m_len, clustersz)); |
1325 |
|
1567 |
|
1326 |
m->m_len = MIN(m->m_len, len); |
1568 |
m->m_len = MIN(m->m_len, len); |
1327 |
len -= m->m_len; |
1569 |
len -= m->m_len; |
Lines 1331-1349
Link Here
|
1331 |
nreplace++; |
1573 |
nreplace++; |
1332 |
} |
1574 |
} |
1333 |
|
1575 |
|
1334 |
KASSERT(nreplace <= sc->vtnet_rx_nmbufs, |
1576 |
KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs, |
1335 |
("%s: too many replacement mbufs %d max %d", __func__, nreplace, |
1577 |
("%s: invalid replacement mbuf count %d max %d", __func__, |
1336 |
sc->vtnet_rx_nmbufs)); |
1578 |
nreplace, sc->vtnet_rx_nmbufs)); |
1337 |
|
1579 |
|
1338 |
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); |
1580 |
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); |
1339 |
if (m_new == NULL) { |
1581 |
if (m_new == NULL) { |
1340 |
m_prev->m_len = clsize; |
1582 |
m_prev->m_len = clustersz; |
1341 |
return (ENOBUFS); |
1583 |
return (ENOBUFS); |
1342 |
} |
1584 |
} |
1343 |
|
1585 |
|
1344 |
/* |
1586 |
/* |
1345 |
* Move any unused mbufs from the received chain onto the end |
1587 |
* Move any unused mbufs from the received mbuf chain onto the |
1346 |
* of the new chain. |
1588 |
* end of the replacement chain. |
1347 |
*/ |
1589 |
*/ |
1348 |
if (m_prev->m_next != NULL) { |
1590 |
if (m_prev->m_next != NULL) { |
1349 |
m_tail->m_next = m_prev->m_next; |
1591 |
m_tail->m_next = m_prev->m_next; |
Lines 1353-1373
Link Here
|
1353 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1595 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1354 |
if (error) { |
1596 |
if (error) { |
1355 |
/* |
1597 |
/* |
1356 |
* BAD! We could not enqueue the replacement mbuf chain. We |
1598 |
* The replacement is suppose to be an copy of the one |
1357 |
* must restore the m0 chain to the original state if it was |
1599 |
* dequeued so this is a very unexpected error. |
1358 |
* modified so we can subsequently discard it. |
|
|
1359 |
* |
1600 |
* |
1360 |
* NOTE: The replacement is suppose to be an identical copy |
1601 |
* Restore the m0 chain to the original state if it was |
1361 |
* to the one just dequeued so this is an unexpected error. |
1602 |
* modified so we can then discard it. |
1362 |
*/ |
1603 |
*/ |
1363 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
|
|
1364 |
|
1365 |
if (m_tail->m_next != NULL) { |
1604 |
if (m_tail->m_next != NULL) { |
1366 |
m_prev->m_next = m_tail->m_next; |
1605 |
m_prev->m_next = m_tail->m_next; |
1367 |
m_tail->m_next = NULL; |
1606 |
m_tail->m_next = NULL; |
1368 |
} |
1607 |
} |
1369 |
|
1608 |
m_prev->m_len = clustersz; |
1370 |
m_prev->m_len = clsize; |
1609 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1371 |
m_freem(m_new); |
1610 |
m_freem(m_new); |
1372 |
} |
1611 |
} |
1373 |
|
1612 |
|
Lines 1383-1413
Link Here
|
1383 |
|
1622 |
|
1384 |
sc = rxq->vtnrx_sc; |
1623 |
sc = rxq->vtnrx_sc; |
1385 |
|
1624 |
|
1386 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, |
1625 |
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
1387 |
("%s: chained mbuf without LRO_NOMRG", __func__)); |
1626 |
return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len)); |
1388 |
|
1627 |
|
1389 |
if (m->m_next == NULL) { |
1628 |
MPASS(m->m_next == NULL); |
1390 |
/* Fast-path for the common case of just one mbuf. */ |
1629 |
if (m->m_len < len) |
1391 |
if (m->m_len < len) |
1630 |
return (EMSGSIZE); |
1392 |
return (EINVAL); |
|
|
1393 |
|
1631 |
|
1394 |
m_new = vtnet_rx_alloc_buf(sc, 1, NULL); |
1632 |
m_new = vtnet_rx_alloc_buf(sc, 1, NULL); |
1395 |
if (m_new == NULL) |
1633 |
if (m_new == NULL) |
1396 |
return (ENOBUFS); |
1634 |
return (ENOBUFS); |
1397 |
|
1635 |
|
1398 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1636 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1399 |
if (error) { |
1637 |
if (error) { |
1400 |
/* |
1638 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1401 |
* The new mbuf is suppose to be an identical |
1639 |
m_freem(m_new); |
1402 |
* copy of the one just dequeued so this is an |
|
|
1403 |
* unexpected error. |
1404 |
*/ |
1405 |
m_freem(m_new); |
1406 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1407 |
} else |
1408 |
m->m_len = len; |
1409 |
} else |
1640 |
} else |
1410 |
error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); |
1641 |
m->m_len = len; |
1411 |
|
1642 |
|
1412 |
return (error); |
1643 |
return (error); |
1413 |
} |
1644 |
} |
Lines 1417-1455
Link Here
|
1417 |
{ |
1648 |
{ |
1418 |
struct vtnet_softc *sc; |
1649 |
struct vtnet_softc *sc; |
1419 |
struct sglist *sg; |
1650 |
struct sglist *sg; |
1420 |
struct vtnet_rx_header *rxhdr; |
1651 |
int header_inlined, error; |
1421 |
uint8_t *mdata; |
|
|
1422 |
int offset, error; |
1423 |
|
1652 |
|
1424 |
sc = rxq->vtnrx_sc; |
1653 |
sc = rxq->vtnrx_sc; |
1425 |
sg = rxq->vtnrx_sg; |
1654 |
sg = rxq->vtnrx_sg; |
1426 |
mdata = mtod(m, uint8_t *); |
|
|
1427 |
|
1655 |
|
|
|
1656 |
KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
1657 |
("%s: mbuf chain without LRO_NOMRG", __func__)); |
1428 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
1658 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
1429 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, |
|
|
1430 |
("%s: chained mbuf without LRO_NOMRG", __func__)); |
1431 |
KASSERT(m->m_len == sc->vtnet_rx_clsize, |
1432 |
("%s: unexpected cluster size %d/%d", __func__, m->m_len, |
1433 |
sc->vtnet_rx_clsize)); |
1434 |
|
1659 |
|
1435 |
sglist_reset(sg); |
1660 |
sglist_reset(sg); |
1436 |
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
1661 |
header_inlined = vtnet_modern(sc) || |
|
|
1662 |
(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */ |
1663 |
|
1664 |
if (header_inlined) |
1665 |
error = sglist_append_mbuf(sg, m); |
1666 |
else { |
1667 |
struct vtnet_rx_header *rxhdr = |
1668 |
mtod(m, struct vtnet_rx_header *); |
1437 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); |
1669 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); |
1438 |
rxhdr = (struct vtnet_rx_header *) mdata; |
|
|
1439 |
sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); |
1440 |
offset = sizeof(struct vtnet_rx_header); |
1441 |
} else |
1442 |
offset = 0; |
1443 |
|
1670 |
|
1444 |
sglist_append(sg, mdata + offset, m->m_len - offset); |
1671 |
/* Append the header and remaining mbuf data. */ |
1445 |
if (m->m_next != NULL) { |
1672 |
error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); |
1446 |
error = sglist_append_mbuf(sg, m->m_next); |
1673 |
if (error) |
1447 |
MPASS(error == 0); |
1674 |
return (error); |
|
|
1675 |
error = sglist_append(sg, &rxhdr[1], |
1676 |
m->m_len - sizeof(struct vtnet_rx_header)); |
1677 |
if (error) |
1678 |
return (error); |
1679 |
|
1680 |
if (m->m_next != NULL) |
1681 |
error = sglist_append_mbuf(sg, m->m_next); |
1448 |
} |
1682 |
} |
1449 |
|
1683 |
|
1450 |
error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); |
1684 |
if (error) |
|
|
1685 |
return (error); |
1451 |
|
1686 |
|
1452 |
return (error); |
1687 |
return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg)); |
1453 |
} |
1688 |
} |
1454 |
|
1689 |
|
1455 |
static int |
1690 |
static int |
Lines 1472-1525
Link Here
|
1472 |
return (error); |
1707 |
return (error); |
1473 |
} |
1708 |
} |
1474 |
|
1709 |
|
1475 |
/* |
|
|
1476 |
* Use the checksum offset in the VirtIO header to set the |
1477 |
* correct CSUM_* flags. |
1478 |
*/ |
1479 |
static int |
1710 |
static int |
1480 |
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, |
1711 |
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype, |
1481 |
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) |
1712 |
int hoff, struct virtio_net_hdr *hdr) |
1482 |
{ |
1713 |
{ |
1483 |
struct vtnet_softc *sc; |
1714 |
struct vtnet_softc *sc; |
1484 |
#if defined(INET) || defined(INET6) |
1715 |
int error; |
1485 |
int offset = hdr->csum_start + hdr->csum_offset; |
|
|
1486 |
#endif |
1487 |
|
1716 |
|
1488 |
sc = rxq->vtnrx_sc; |
1717 |
sc = rxq->vtnrx_sc; |
1489 |
|
1718 |
|
1490 |
/* Only do a basic sanity check on the offset. */ |
1719 |
/* |
1491 |
switch (eth_type) { |
1720 |
* NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does |
1492 |
#if defined(INET) |
1721 |
* not have an analogous CSUM flag. The checksum has been validated, |
1493 |
case ETHERTYPE_IP: |
1722 |
* but is incomplete (TCP/UDP pseudo header). |
1494 |
if (__predict_false(offset < ip_start + sizeof(struct ip))) |
1723 |
* |
1495 |
return (1); |
1724 |
* The packet is likely from another VM on the same host that itself |
1496 |
break; |
1725 |
* performed checksum offloading so Tx/Rx is basically a memcpy and |
1497 |
#endif |
1726 |
* the checksum has little value. |
1498 |
#if defined(INET6) |
1727 |
* |
1499 |
case ETHERTYPE_IPV6: |
1728 |
* Default to receiving the packet as-is for performance reasons, but |
1500 |
if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) |
1729 |
* this can cause issues if the packet is to be forwarded because it |
1501 |
return (1); |
1730 |
* does not contain a valid checksum. This patch may be helpful: |
1502 |
break; |
1731 |
* https://reviews.freebsd.org/D6611. In the meantime, have the driver |
1503 |
#endif |
1732 |
* compute the checksum if requested. |
1504 |
default: |
1733 |
* |
1505 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1734 |
* BMV: Need to add an CSUM_PARTIAL flag? |
1506 |
return (1); |
1735 |
*/ |
|
|
1736 |
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) { |
1737 |
error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr); |
1738 |
return (error); |
1507 |
} |
1739 |
} |
1508 |
|
1740 |
|
1509 |
/* |
1741 |
/* |
1510 |
* Use the offset to determine the appropriate CSUM_* flags. This is |
1742 |
* Compute the checksum in the driver so the packet will contain a |
1511 |
* a bit dirty, but we can get by with it since the checksum offsets |
1743 |
* valid checksum. The checksum is at csum_offset from csum_start. |
1512 |
* happen to be different. We assume the host host does not do IPv4 |
|
|
1513 |
* header checksum offloading. |
1514 |
*/ |
1744 |
*/ |
1515 |
switch (hdr->csum_offset) { |
1745 |
switch (etype) { |
1516 |
case offsetof(struct udphdr, uh_sum): |
1746 |
#if defined(INET) || defined(INET6) |
1517 |
case offsetof(struct tcphdr, th_sum): |
1747 |
case ETHERTYPE_IP: |
|
|
1748 |
case ETHERTYPE_IPV6: { |
1749 |
int csum_off, csum_end; |
1750 |
uint16_t csum; |
1751 |
|
1752 |
csum_off = hdr->csum_start + hdr->csum_offset; |
1753 |
csum_end = csum_off + sizeof(uint16_t); |
1754 |
|
1755 |
/* Assume checksum will be in the first mbuf. */ |
1756 |
if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) |
1757 |
return (1); |
1758 |
|
1759 |
/* |
1760 |
* Like in_delayed_cksum()/in6_delayed_cksum(), compute the |
1761 |
* checksum and write it at the specified offset. We could |
1762 |
* try to verify the packet: csum_start should probably |
1763 |
* correspond to the start of the TCP/UDP header. |
1764 |
* |
1765 |
* BMV: Need to properly handle UDP with zero checksum. Is |
1766 |
* the IPv4 header checksum implicitly validated? |
1767 |
*/ |
1768 |
csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); |
1769 |
*(uint16_t *)(mtodo(m, csum_off)) = csum; |
1518 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1770 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1519 |
m->m_pkthdr.csum_data = 0xFFFF; |
1771 |
m->m_pkthdr.csum_data = 0xFFFF; |
1520 |
break; |
1772 |
break; |
|
|
1773 |
} |
1774 |
#endif |
1521 |
default: |
1775 |
default: |
1522 |
sc->vtnet_stats.rx_csum_bad_offset++; |
1776 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1523 |
return (1); |
1777 |
return (1); |
1524 |
} |
1778 |
} |
1525 |
|
1779 |
|
Lines 1527-1590
Link Here
|
1527 |
} |
1781 |
} |
1528 |
|
1782 |
|
1529 |
static int |
1783 |
static int |
1530 |
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, |
1784 |
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, |
1531 |
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) |
1785 |
uint16_t etype, int hoff, struct virtio_net_hdr *hdr) |
1532 |
{ |
1786 |
{ |
1533 |
struct vtnet_softc *sc; |
1787 |
struct vtnet_softc *sc; |
1534 |
int offset, proto; |
1788 |
int protocol; |
1535 |
|
1789 |
|
1536 |
sc = rxq->vtnrx_sc; |
1790 |
sc = rxq->vtnrx_sc; |
1537 |
|
1791 |
|
1538 |
switch (eth_type) { |
1792 |
switch (etype) { |
1539 |
#if defined(INET) |
1793 |
#if defined(INET) |
1540 |
case ETHERTYPE_IP: { |
1794 |
case ETHERTYPE_IP: |
1541 |
struct ip *ip; |
1795 |
if (__predict_false(m->m_len < hoff + sizeof(struct ip))) |
1542 |
if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) |
1796 |
protocol = IPPROTO_DONE; |
1543 |
return (1); |
1797 |
else { |
1544 |
ip = (struct ip *)(m->m_data + ip_start); |
1798 |
struct ip *ip = (struct ip *)(m->m_data + hoff); |
1545 |
proto = ip->ip_p; |
1799 |
protocol = ip->ip_p; |
1546 |
offset = ip_start + (ip->ip_hl << 2); |
1800 |
} |
1547 |
break; |
1801 |
break; |
1548 |
} |
|
|
1549 |
#endif |
1802 |
#endif |
1550 |
#if defined(INET6) |
1803 |
#if defined(INET6) |
1551 |
case ETHERTYPE_IPV6: |
1804 |
case ETHERTYPE_IPV6: |
1552 |
if (__predict_false(m->m_len < ip_start + |
1805 |
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr)) |
1553 |
sizeof(struct ip6_hdr))) |
1806 |
|| ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) |
1554 |
return (1); |
1807 |
protocol = IPPROTO_DONE; |
1555 |
offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); |
|
|
1556 |
if (__predict_false(offset < 0)) |
1557 |
return (1); |
1558 |
break; |
1808 |
break; |
1559 |
#endif |
1809 |
#endif |
1560 |
default: |
1810 |
default: |
1561 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1811 |
protocol = IPPROTO_DONE; |
1562 |
return (1); |
1812 |
break; |
1563 |
} |
1813 |
} |
1564 |
|
1814 |
|
1565 |
switch (proto) { |
1815 |
switch (protocol) { |
1566 |
case IPPROTO_TCP: |
1816 |
case IPPROTO_TCP: |
1567 |
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) |
|
|
1568 |
return (1); |
1569 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1570 |
m->m_pkthdr.csum_data = 0xFFFF; |
1571 |
break; |
1572 |
case IPPROTO_UDP: |
1817 |
case IPPROTO_UDP: |
1573 |
if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) |
|
|
1574 |
return (1); |
1575 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1818 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1576 |
m->m_pkthdr.csum_data = 0xFFFF; |
1819 |
m->m_pkthdr.csum_data = 0xFFFF; |
1577 |
break; |
1820 |
break; |
1578 |
default: |
1821 |
default: |
1579 |
/* |
1822 |
/* |
1580 |
* For the remaining protocols, FreeBSD does not support |
1823 |
* FreeBSD does not support checksum offloading of this |
1581 |
* checksum offloading, so the checksum will be recomputed. |
1824 |
* protocol. Let the stack re-verify the checksum later |
|
|
1825 |
* if the protocol is supported. |
1582 |
*/ |
1826 |
*/ |
1583 |
#if 0 |
1827 |
#if 0 |
1584 |
if_printf(sc->vtnet_ifp, "cksum offload of unsupported " |
1828 |
if_printf(sc->vtnet_ifp, |
1585 |
"protocol eth_type=%#x proto=%d csum_start=%d " |
1829 |
"%s: checksum offload of unsupported protocol " |
1586 |
"csum_offset=%d\n", __func__, eth_type, proto, |
1830 |
"etype=%#x protocol=%d csum_start=%d csum_offset=%d\n", |
1587 |
hdr->csum_start, hdr->csum_offset); |
1831 |
__func__, etype, protocol, hdr->csum_start, |
|
|
1832 |
hdr->csum_offset); |
1588 |
#endif |
1833 |
#endif |
1589 |
break; |
1834 |
break; |
1590 |
} |
1835 |
} |
Lines 1592-1632
Link Here
|
1592 |
return (0); |
1837 |
return (0); |
1593 |
} |
1838 |
} |
1594 |
|
1839 |
|
1595 |
/* |
|
|
1596 |
* Set the appropriate CSUM_* flags. Unfortunately, the information |
1597 |
* provided is not directly useful to us. The VirtIO header gives the |
1598 |
* offset of the checksum, which is all Linux needs, but this is not |
1599 |
* how FreeBSD does things. We are forced to peek inside the packet |
1600 |
* a bit. |
1601 |
* |
1602 |
* It would be nice if VirtIO gave us the L4 protocol or if FreeBSD |
1603 |
* could accept the offsets and let the stack figure it out. |
1604 |
*/ |
1605 |
static int |
1840 |
static int |
1606 |
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, |
1841 |
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, |
1607 |
struct virtio_net_hdr *hdr) |
1842 |
struct virtio_net_hdr *hdr) |
1608 |
{ |
1843 |
{ |
1609 |
struct ether_header *eh; |
1844 |
const struct ether_header *eh; |
1610 |
struct ether_vlan_header *evh; |
1845 |
int hoff; |
1611 |
uint16_t eth_type; |
1846 |
uint16_t etype; |
1612 |
int offset, error; |
|
|
1613 |
|
1847 |
|
1614 |
eh = mtod(m, struct ether_header *); |
1848 |
eh = mtod(m, const struct ether_header *); |
1615 |
eth_type = ntohs(eh->ether_type); |
1849 |
etype = ntohs(eh->ether_type); |
1616 |
if (eth_type == ETHERTYPE_VLAN) { |
1850 |
if (etype == ETHERTYPE_VLAN) { |
1617 |
/* BMV: We should handle nested VLAN tags too. */ |
1851 |
/* TODO BMV: Handle QinQ. */ |
1618 |
evh = mtod(m, struct ether_vlan_header *); |
1852 |
const struct ether_vlan_header *evh = |
1619 |
eth_type = ntohs(evh->evl_proto); |
1853 |
mtod(m, const struct ether_vlan_header *); |
1620 |
offset = sizeof(struct ether_vlan_header); |
1854 |
etype = ntohs(evh->evl_proto); |
|
|
1855 |
hoff = sizeof(struct ether_vlan_header); |
1621 |
} else |
1856 |
} else |
1622 |
offset = sizeof(struct ether_header); |
1857 |
hoff = sizeof(struct ether_header); |
1623 |
|
1858 |
|
1624 |
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) |
1859 |
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) |
1625 |
error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); |
1860 |
return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr)); |
1626 |
else |
1861 |
else /* VIRTIO_NET_HDR_F_DATA_VALID */ |
1627 |
error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); |
1862 |
return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr)); |
1628 |
|
|
|
1629 |
return (error); |
1630 |
} |
1863 |
} |
1631 |
|
1864 |
|
1632 |
static void |
1865 |
static void |
Lines 1661-1674
Link Here
|
1661 |
{ |
1894 |
{ |
1662 |
struct vtnet_softc *sc; |
1895 |
struct vtnet_softc *sc; |
1663 |
struct virtqueue *vq; |
1896 |
struct virtqueue *vq; |
1664 |
struct mbuf *m, *m_tail; |
1897 |
struct mbuf *m_tail; |
1665 |
int len; |
|
|
1666 |
|
1898 |
|
1667 |
sc = rxq->vtnrx_sc; |
1899 |
sc = rxq->vtnrx_sc; |
1668 |
vq = rxq->vtnrx_vq; |
1900 |
vq = rxq->vtnrx_vq; |
1669 |
m_tail = m_head; |
1901 |
m_tail = m_head; |
1670 |
|
1902 |
|
1671 |
while (--nbufs > 0) { |
1903 |
while (--nbufs > 0) { |
|
|
1904 |
struct mbuf *m; |
1905 |
int len; |
1906 |
|
1672 |
m = virtqueue_dequeue(vq, &len); |
1907 |
m = virtqueue_dequeue(vq, &len); |
1673 |
if (m == NULL) { |
1908 |
if (m == NULL) { |
1674 |
rxq->vtnrx_stats.vrxs_ierrors++; |
1909 |
rxq->vtnrx_stats.vrxs_ierrors++; |
Lines 1703-1721
Link Here
|
1703 |
return (1); |
1938 |
return (1); |
1704 |
} |
1939 |
} |
1705 |
|
1940 |
|
|
|
1941 |
#if defined(INET) || defined(INET6) |
1942 |
static int |
1943 |
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m) |
1944 |
{ |
1945 |
struct lro_ctrl *lro; |
1946 |
|
1947 |
lro = &rxq->vtnrx_lro; |
1948 |
|
1949 |
if (lro->lro_mbuf_max != 0) { |
1950 |
tcp_lro_queue_mbuf(lro, m); |
1951 |
return (0); |
1952 |
} |
1953 |
|
1954 |
return (tcp_lro_rx(lro, m, 0)); |
1955 |
} |
1956 |
#endif |
1957 |
|
1706 |
static void |
1958 |
static void |
1707 |
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, |
1959 |
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, |
1708 |
struct virtio_net_hdr *hdr) |
1960 |
struct virtio_net_hdr *hdr) |
1709 |
{ |
1961 |
{ |
1710 |
struct vtnet_softc *sc; |
1962 |
struct vtnet_softc *sc; |
1711 |
struct ifnet *ifp; |
1963 |
struct ifnet *ifp; |
1712 |
struct ether_header *eh; |
|
|
1713 |
|
1964 |
|
1714 |
sc = rxq->vtnrx_sc; |
1965 |
sc = rxq->vtnrx_sc; |
1715 |
ifp = sc->vtnet_ifp; |
1966 |
ifp = sc->vtnet_ifp; |
1716 |
|
1967 |
|
1717 |
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { |
1968 |
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { |
1718 |
eh = mtod(m, struct ether_header *); |
1969 |
struct ether_header *eh = mtod(m, struct ether_header *); |
1719 |
if (eh->ether_type == htons(ETHERTYPE_VLAN)) { |
1970 |
if (eh->ether_type == htons(ETHERTYPE_VLAN)) { |
1720 |
vtnet_vlan_tag_remove(m); |
1971 |
vtnet_vlan_tag_remove(m); |
1721 |
/* |
1972 |
/* |
Lines 1730-1754
Link Here
|
1730 |
m->m_pkthdr.flowid = rxq->vtnrx_id; |
1981 |
m->m_pkthdr.flowid = rxq->vtnrx_id; |
1731 |
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); |
1982 |
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); |
1732 |
|
1983 |
|
1733 |
/* |
1984 |
if (hdr->flags & |
1734 |
* BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum |
1985 |
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { |
1735 |
* distinction that Linux does. Need to reevaluate if performing |
|
|
1736 |
* offloading for the NEEDS_CSUM case is really appropriate. |
1737 |
*/ |
1738 |
if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | |
1739 |
VIRTIO_NET_HDR_F_DATA_VALID)) { |
1740 |
if (vtnet_rxq_csum(rxq, m, hdr) == 0) |
1986 |
if (vtnet_rxq_csum(rxq, m, hdr) == 0) |
1741 |
rxq->vtnrx_stats.vrxs_csum++; |
1987 |
rxq->vtnrx_stats.vrxs_csum++; |
1742 |
else |
1988 |
else |
1743 |
rxq->vtnrx_stats.vrxs_csum_failed++; |
1989 |
rxq->vtnrx_stats.vrxs_csum_failed++; |
1744 |
} |
1990 |
} |
1745 |
|
1991 |
|
|
|
1992 |
if (hdr->gso_size != 0) { |
1993 |
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
1994 |
case VIRTIO_NET_HDR_GSO_TCPV4: |
1995 |
case VIRTIO_NET_HDR_GSO_TCPV6: |
1996 |
// not available in 11.x mbuf |
1997 |
// m->m_pkthdr.lro_nsegs = |
1998 |
// howmany(m->m_pkthdr.len, hdr->gso_size); |
1999 |
rxq->vtnrx_stats.vrxs_host_lro++; |
2000 |
break; |
2001 |
} |
2002 |
} |
2003 |
|
1746 |
rxq->vtnrx_stats.vrxs_ipackets++; |
2004 |
rxq->vtnrx_stats.vrxs_ipackets++; |
1747 |
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; |
2005 |
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; |
1748 |
|
2006 |
|
1749 |
VTNET_RXQ_UNLOCK(rxq); |
2007 |
#if defined(INET) || defined(INET6) |
|
|
2008 |
if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) { |
2009 |
if (vtnet_lro_rx(rxq, m) == 0) |
2010 |
return; |
2011 |
} |
2012 |
#endif |
2013 |
|
1750 |
(*ifp->if_input)(ifp, m); |
2014 |
(*ifp->if_input)(ifp, m); |
1751 |
VTNET_RXQ_LOCK(rxq); |
|
|
1752 |
} |
2015 |
} |
1753 |
|
2016 |
|
1754 |
static int |
2017 |
static int |
Lines 1758-1777
Link Here
|
1758 |
struct vtnet_softc *sc; |
2021 |
struct vtnet_softc *sc; |
1759 |
struct ifnet *ifp; |
2022 |
struct ifnet *ifp; |
1760 |
struct virtqueue *vq; |
2023 |
struct virtqueue *vq; |
1761 |
struct mbuf *m; |
2024 |
int deq, count; |
1762 |
struct virtio_net_hdr_mrg_rxbuf *mhdr; |
|
|
1763 |
int len, deq, nbufs, adjsz, count; |
1764 |
|
2025 |
|
1765 |
sc = rxq->vtnrx_sc; |
2026 |
sc = rxq->vtnrx_sc; |
1766 |
vq = rxq->vtnrx_vq; |
2027 |
vq = rxq->vtnrx_vq; |
1767 |
ifp = sc->vtnet_ifp; |
2028 |
ifp = sc->vtnet_ifp; |
1768 |
hdr = &lhdr; |
|
|
1769 |
deq = 0; |
2029 |
deq = 0; |
1770 |
count = sc->vtnet_rx_process_limit; |
2030 |
count = sc->vtnet_rx_process_limit; |
1771 |
|
2031 |
|
1772 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
2032 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
1773 |
|
2033 |
|
|
|
2034 |
#ifdef DEV_NETMAP |
2035 |
if (netmap_rx_irq(ifp, 0, &deq)) |
2036 |
return (0); |
2037 |
#endif |
2038 |
|
1774 |
while (count-- > 0) { |
2039 |
while (count-- > 0) { |
|
|
2040 |
struct mbuf *m; |
2041 |
int len, nbufs, adjsz; |
2042 |
|
1775 |
m = virtqueue_dequeue(vq, &len); |
2043 |
m = virtqueue_dequeue(vq, &len); |
1776 |
if (m == NULL) |
2044 |
if (m == NULL) |
1777 |
break; |
2045 |
break; |
Lines 1783-1800
Link Here
|
1783 |
continue; |
2051 |
continue; |
1784 |
} |
2052 |
} |
1785 |
|
2053 |
|
1786 |
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
2054 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) { |
|
|
2055 |
struct virtio_net_hdr_mrg_rxbuf *mhdr = |
2056 |
mtod(m, struct virtio_net_hdr_mrg_rxbuf *); |
2057 |
nbufs = vtnet_htog16(sc, mhdr->num_buffers); |
2058 |
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
2059 |
} else if (vtnet_modern(sc)) { |
2060 |
nbufs = 1; /* num_buffers is always 1 */ |
2061 |
adjsz = sizeof(struct virtio_net_hdr_v1); |
2062 |
} else { |
1787 |
nbufs = 1; |
2063 |
nbufs = 1; |
1788 |
adjsz = sizeof(struct vtnet_rx_header); |
2064 |
adjsz = sizeof(struct vtnet_rx_header); |
1789 |
/* |
2065 |
/* |
1790 |
* Account for our pad inserted between the header |
2066 |
* Account for our gap between the header and start of |
1791 |
* and the actual start of the frame. |
2067 |
* data to keep the segments separated. |
1792 |
*/ |
2068 |
*/ |
1793 |
len += VTNET_RX_HEADER_PAD; |
2069 |
len += VTNET_RX_HEADER_PAD; |
1794 |
} else { |
|
|
1795 |
mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); |
1796 |
nbufs = mhdr->num_buffers; |
1797 |
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
1798 |
} |
2070 |
} |
1799 |
|
2071 |
|
1800 |
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { |
2072 |
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { |
Lines 1816-1841
Link Here
|
1816 |
} |
2088 |
} |
1817 |
|
2089 |
|
1818 |
/* |
2090 |
/* |
1819 |
* Save copy of header before we strip it. For both mergeable |
2091 |
* Save an endian swapped version of the header prior to it |
1820 |
* and non-mergeable, the header is at the beginning of the |
2092 |
* being stripped. The header is always at the start of the |
1821 |
* mbuf data. We no longer need num_buffers, so always use a |
2093 |
* mbuf data. num_buffers was already saved (and not needed) |
1822 |
* regular header. |
2094 |
* so use the standard header. |
1823 |
* |
|
|
1824 |
* BMV: Is this memcpy() expensive? We know the mbuf data is |
1825 |
* still valid even after the m_adj(). |
1826 |
*/ |
2095 |
*/ |
1827 |
memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); |
2096 |
hdr = mtod(m, struct virtio_net_hdr *); |
|
|
2097 |
lhdr.flags = hdr->flags; |
2098 |
lhdr.gso_type = hdr->gso_type; |
2099 |
lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len); |
2100 |
lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size); |
2101 |
lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start); |
2102 |
lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset); |
1828 |
m_adj(m, adjsz); |
2103 |
m_adj(m, adjsz); |
1829 |
|
2104 |
|
1830 |
vtnet_rxq_input(rxq, m, hdr); |
2105 |
vtnet_rxq_input(rxq, m, &lhdr); |
1831 |
|
|
|
1832 |
/* Must recheck after dropping the Rx lock. */ |
1833 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) |
1834 |
break; |
1835 |
} |
2106 |
} |
1836 |
|
2107 |
|
1837 |
if (deq > 0) |
2108 |
if (deq > 0) { |
|
|
2109 |
#if defined(INET) || defined(INET6) |
2110 |
tcp_lro_flush_all(&rxq->vtnrx_lro); |
2111 |
#endif |
1838 |
virtqueue_notify(vq); |
2112 |
virtqueue_notify(vq); |
|
|
2113 |
} |
1839 |
|
2114 |
|
1840 |
return (count > 0 ? 0 : EAGAIN); |
2115 |
return (count > 0 ? 0 : EAGAIN); |
1841 |
} |
2116 |
} |
Lines 1864-1874
Link Here
|
1864 |
return; |
2139 |
return; |
1865 |
} |
2140 |
} |
1866 |
|
2141 |
|
1867 |
#ifdef DEV_NETMAP |
|
|
1868 |
if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS) |
1869 |
return; |
1870 |
#endif /* DEV_NETMAP */ |
1871 |
|
1872 |
VTNET_RXQ_LOCK(rxq); |
2142 |
VTNET_RXQ_LOCK(rxq); |
1873 |
|
2143 |
|
1874 |
again: |
2144 |
again: |
Lines 1888-1895
Link Here
|
1888 |
if (tries++ < VTNET_INTR_DISABLE_RETRIES) |
2158 |
if (tries++ < VTNET_INTR_DISABLE_RETRIES) |
1889 |
goto again; |
2159 |
goto again; |
1890 |
|
2160 |
|
1891 |
VTNET_RXQ_UNLOCK(rxq); |
|
|
1892 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
2161 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
|
|
2162 |
VTNET_RXQ_UNLOCK(rxq); |
1893 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
2163 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
1894 |
} else |
2164 |
} else |
1895 |
VTNET_RXQ_UNLOCK(rxq); |
2165 |
VTNET_RXQ_UNLOCK(rxq); |
Lines 1919-1940
Link Here
|
1919 |
if (!more) |
2189 |
if (!more) |
1920 |
vtnet_rxq_disable_intr(rxq); |
2190 |
vtnet_rxq_disable_intr(rxq); |
1921 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
2191 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
|
|
2192 |
VTNET_RXQ_UNLOCK(rxq); |
1922 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
2193 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
1923 |
} |
2194 |
} else |
|
|
2195 |
VTNET_RXQ_UNLOCK(rxq); |
2196 |
} |
1924 |
|
2197 |
|
1925 |
VTNET_RXQ_UNLOCK(rxq); |
2198 |
static int |
|
|
2199 |
vtnet_txq_intr_threshold(struct vtnet_txq *txq) |
2200 |
{ |
2201 |
struct vtnet_softc *sc; |
2202 |
int threshold; |
2203 |
|
2204 |
sc = txq->vtntx_sc; |
2205 |
|
2206 |
/* |
2207 |
* The Tx interrupt is disabled until the queue free count falls |
2208 |
* below our threshold. Completed frames are drained from the Tx |
2209 |
* virtqueue before transmitting new frames and in the watchdog |
2210 |
* callout, so the frequency of Tx interrupts is greatly reduced, |
2211 |
* at the cost of not freeing mbufs as quickly as they otherwise |
2212 |
* would be. |
2213 |
*/ |
2214 |
threshold = virtqueue_size(txq->vtntx_vq) / 4; |
2215 |
|
2216 |
/* |
2217 |
* Without indirect descriptors, leave enough room for the most |
2218 |
* segments we handle. |
2219 |
*/ |
2220 |
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && |
2221 |
threshold < sc->vtnet_tx_nsegs) |
2222 |
threshold = sc->vtnet_tx_nsegs; |
2223 |
|
2224 |
return (threshold); |
1926 |
} |
2225 |
} |
1927 |
|
2226 |
|
1928 |
static int |
2227 |
static int |
1929 |
vtnet_txq_below_threshold(struct vtnet_txq *txq) |
2228 |
vtnet_txq_below_threshold(struct vtnet_txq *txq) |
1930 |
{ |
2229 |
{ |
1931 |
struct vtnet_softc *sc; |
|
|
1932 |
struct virtqueue *vq; |
2230 |
struct virtqueue *vq; |
1933 |
|
2231 |
|
1934 |
sc = txq->vtntx_sc; |
|
|
1935 |
vq = txq->vtntx_vq; |
2232 |
vq = txq->vtntx_vq; |
1936 |
|
2233 |
|
1937 |
return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); |
2234 |
return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold); |
1938 |
} |
2235 |
} |
1939 |
|
2236 |
|
1940 |
static int |
2237 |
static int |
Lines 1969-1989
Link Here
|
1969 |
struct virtqueue *vq; |
2266 |
struct virtqueue *vq; |
1970 |
struct vtnet_tx_header *txhdr; |
2267 |
struct vtnet_tx_header *txhdr; |
1971 |
int last; |
2268 |
int last; |
1972 |
#ifdef DEV_NETMAP |
|
|
1973 |
int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX, |
1974 |
txq->vtntx_id); |
1975 |
#else /* !DEV_NETMAP */ |
1976 |
int netmap_bufs = 0; |
1977 |
#endif /* !DEV_NETMAP */ |
1978 |
|
2269 |
|
1979 |
vq = txq->vtntx_vq; |
2270 |
vq = txq->vtntx_vq; |
1980 |
last = 0; |
2271 |
last = 0; |
1981 |
|
2272 |
|
1982 |
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { |
2273 |
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { |
1983 |
if (!netmap_bufs) { |
2274 |
m_freem(txhdr->vth_mbuf); |
1984 |
m_freem(txhdr->vth_mbuf); |
2275 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
1985 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
|
|
1986 |
} |
1987 |
} |
2276 |
} |
1988 |
|
2277 |
|
1989 |
KASSERT(virtqueue_empty(vq), |
2278 |
KASSERT(virtqueue_empty(vq), |
Lines 1991-2002
Link Here
|
1991 |
} |
2280 |
} |
1992 |
|
2281 |
|
1993 |
/* |
2282 |
/* |
1994 |
* BMV: Much of this can go away once we finally have offsets in |
2283 |
* BMV: This can go away once we finally have offsets in the mbuf header. |
1995 |
* the mbuf packet header. Bug andre@. |
|
|
1996 |
*/ |
2284 |
*/ |
1997 |
static int |
2285 |
static int |
1998 |
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, |
2286 |
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, |
1999 |
int *etype, int *proto, int *start) |
2287 |
int *proto, int *start) |
2000 |
{ |
2288 |
{ |
2001 |
struct vtnet_softc *sc; |
2289 |
struct vtnet_softc *sc; |
2002 |
struct ether_vlan_header *evh; |
2290 |
struct ether_vlan_header *evh; |
Lines 2040-2046
Link Here
|
2040 |
break; |
2328 |
break; |
2041 |
#endif |
2329 |
#endif |
2042 |
default: |
2330 |
default: |
2043 |
sc->vtnet_stats.tx_csum_bad_ethtype++; |
2331 |
sc->vtnet_stats.tx_csum_unknown_ethtype++; |
2044 |
return (EINVAL); |
2332 |
return (EINVAL); |
2045 |
} |
2333 |
} |
2046 |
|
2334 |
|
Lines 2048-2054
Link Here
|
2048 |
} |
2336 |
} |
2049 |
|
2337 |
|
2050 |
static int |
2338 |
static int |
2051 |
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, |
2339 |
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags, |
2052 |
int offset, struct virtio_net_hdr *hdr) |
2340 |
int offset, struct virtio_net_hdr *hdr) |
2053 |
{ |
2341 |
{ |
2054 |
static struct timeval lastecn; |
2342 |
static struct timeval lastecn; |
Lines 2064-2079
Link Here
|
2064 |
} else |
2352 |
} else |
2065 |
tcp = (struct tcphdr *)(m->m_data + offset); |
2353 |
tcp = (struct tcphdr *)(m->m_data + offset); |
2066 |
|
2354 |
|
2067 |
hdr->hdr_len = offset + (tcp->th_off << 2); |
2355 |
hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2)); |
2068 |
hdr->gso_size = m->m_pkthdr.tso_segsz; |
2356 |
hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz); |
2069 |
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : |
2357 |
hdr->gso_type = (flags & CSUM_IP_TSO) ? |
2070 |
VIRTIO_NET_HDR_GSO_TCPV6; |
2358 |
VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; |
2071 |
|
2359 |
|
2072 |
if (tcp->th_flags & TH_CWR) { |
2360 |
if (__predict_false(tcp->th_flags & TH_CWR)) { |
2073 |
/* |
2361 |
/* |
2074 |
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, |
2362 |
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In |
2075 |
* ECN support is not on a per-interface basis, but globally via |
2363 |
* FreeBSD, ECN support is not on a per-interface basis, |
2076 |
* the net.inet.tcp.ecn.enable sysctl knob. The default is off. |
2364 |
* but globally via the net.inet.tcp.ecn.enable sysctl |
|
|
2365 |
* knob. The default is off. |
2077 |
*/ |
2366 |
*/ |
2078 |
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { |
2367 |
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { |
2079 |
if (ppsratecheck(&lastecn, &curecn, 1)) |
2368 |
if (ppsratecheck(&lastecn, &curecn, 1)) |
Lines 2103-2132
Link Here
|
2103 |
if (error) |
2392 |
if (error) |
2104 |
goto drop; |
2393 |
goto drop; |
2105 |
|
2394 |
|
2106 |
if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || |
2395 |
if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) { |
2107 |
(etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { |
2396 |
/* Sanity check the parsed mbuf matches the offload flags. */ |
2108 |
/* |
2397 |
if (__predict_false((flags & VTNET_CSUM_OFFLOAD && |
2109 |
* We could compare the IP protocol vs the CSUM_ flag too, |
2398 |
etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6 |
2110 |
* but that really should not be necessary. |
2399 |
&& etype != ETHERTYPE_IPV6))) { |
2111 |
*/ |
2400 |
sc->vtnet_stats.tx_csum_proto_mismatch++; |
|
|
2401 |
goto drop; |
2402 |
} |
2403 |
|
2112 |
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; |
2404 |
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; |
2113 |
hdr->csum_start = csum_start; |
2405 |
hdr->csum_start = vtnet_gtoh16(sc, csum_start); |
2114 |
hdr->csum_offset = m->m_pkthdr.csum_data; |
2406 |
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data); |
2115 |
txq->vtntx_stats.vtxs_csum++; |
2407 |
txq->vtntx_stats.vtxs_csum++; |
2116 |
} |
2408 |
} |
2117 |
|
2409 |
|
2118 |
if (flags & CSUM_TSO) { |
2410 |
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { |
|
|
2411 |
/* |
2412 |
* Sanity check the parsed mbuf IP protocol is TCP, and |
2413 |
* VirtIO TSO reqires the checksum offloading above. |
2414 |
*/ |
2119 |
if (__predict_false(proto != IPPROTO_TCP)) { |
2415 |
if (__predict_false(proto != IPPROTO_TCP)) { |
2120 |
/* Likely failed to correctly parse the mbuf. */ |
|
|
2121 |
sc->vtnet_stats.tx_tso_not_tcp++; |
2416 |
sc->vtnet_stats.tx_tso_not_tcp++; |
2122 |
goto drop; |
2417 |
goto drop; |
|
|
2418 |
} else if (__predict_false((hdr->flags & |
2419 |
VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) { |
2420 |
sc->vtnet_stats.tx_tso_without_csum++; |
2421 |
goto drop; |
2123 |
} |
2422 |
} |
2124 |
|
2423 |
|
2125 |
KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, |
2424 |
error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr); |
2126 |
("%s: mbuf %p TSO without checksum offload %#x", |
|
|
2127 |
__func__, m, flags)); |
2128 |
|
2129 |
error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); |
2130 |
if (error) |
2425 |
if (error) |
2131 |
goto drop; |
2426 |
goto drop; |
2132 |
} |
2427 |
} |
Lines 2155-2162
Link Here
|
2155 |
|
2450 |
|
2156 |
sglist_reset(sg); |
2451 |
sglist_reset(sg); |
2157 |
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); |
2452 |
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); |
2158 |
KASSERT(error == 0 && sg->sg_nseg == 1, |
2453 |
if (error != 0 || sg->sg_nseg != 1) { |
2159 |
("%s: error %d adding header to sglist", __func__, error)); |
2454 |
KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d", |
|
|
2455 |
__func__, error, sg->sg_nseg)); |
2456 |
goto fail; |
2457 |
} |
2160 |
|
2458 |
|
2161 |
error = sglist_append_mbuf(sg, m); |
2459 |
error = sglist_append_mbuf(sg, m); |
2162 |
if (error) { |
2460 |
if (error) { |
Lines 2186-2192
Link Here
|
2186 |
} |
2484 |
} |
2187 |
|
2485 |
|
2188 |
static int |
2486 |
static int |
2189 |
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head) |
2487 |
vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags) |
2190 |
{ |
2488 |
{ |
2191 |
struct vtnet_tx_header *txhdr; |
2489 |
struct vtnet_tx_header *txhdr; |
2192 |
struct virtio_net_hdr *hdr; |
2490 |
struct virtio_net_hdr *hdr; |
Lines 2196-2202
Link Here
|
2196 |
m = *m_head; |
2494 |
m = *m_head; |
2197 |
M_ASSERTPKTHDR(m); |
2495 |
M_ASSERTPKTHDR(m); |
2198 |
|
2496 |
|
2199 |
txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); |
2497 |
txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO); |
2200 |
if (txhdr == NULL) { |
2498 |
if (txhdr == NULL) { |
2201 |
m_freem(m); |
2499 |
m_freem(m); |
2202 |
*m_head = NULL; |
2500 |
*m_head = NULL; |
Lines 2204-2212
Link Here
|
2204 |
} |
2502 |
} |
2205 |
|
2503 |
|
2206 |
/* |
2504 |
/* |
2207 |
* Always use the non-mergeable header, regardless if the feature |
2505 |
* Always use the non-mergeable header, regardless if mergable headers |
2208 |
* was negotiated. For transmit, num_buffers is always zero. The |
2506 |
* were negotiated, because for transmit num_buffers is always zero. |
2209 |
* vtnet_hdr_size is used to enqueue the correct header size. |
2507 |
* The vtnet_hdr_size is used to enqueue the right header size segment. |
2210 |
*/ |
2508 |
*/ |
2211 |
hdr = &txhdr->vth_uhdr.hdr; |
2509 |
hdr = &txhdr->vth_uhdr.hdr; |
2212 |
|
2510 |
|
Lines 2228-2238
Link Here
|
2228 |
} |
2526 |
} |
2229 |
|
2527 |
|
2230 |
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); |
2528 |
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); |
2231 |
if (error == 0) |
|
|
2232 |
return (0); |
2233 |
|
2234 |
fail: |
2529 |
fail: |
2235 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
2530 |
if (error) |
|
|
2531 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
2236 |
|
2532 |
|
2237 |
return (error); |
2533 |
return (error); |
2238 |
} |
2534 |
} |
Lines 2270-2276
Link Here
|
2270 |
if (m0 == NULL) |
2566 |
if (m0 == NULL) |
2271 |
break; |
2567 |
break; |
2272 |
|
2568 |
|
2273 |
if (vtnet_txq_encap(txq, &m0) != 0) { |
2569 |
if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) { |
2274 |
if (m0 != NULL) |
2570 |
if (m0 != NULL) |
2275 |
IFQ_DRV_PREPEND(&ifp->if_snd, m0); |
2571 |
IFQ_DRV_PREPEND(&ifp->if_snd, m0); |
2276 |
break; |
2572 |
break; |
Lines 2347-2353
Link Here
|
2347 |
break; |
2643 |
break; |
2348 |
} |
2644 |
} |
2349 |
|
2645 |
|
2350 |
if (vtnet_txq_encap(txq, &m) != 0) { |
2646 |
if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) { |
2351 |
if (m != NULL) |
2647 |
if (m != NULL) |
2352 |
drbr_putback(ifp, br, m); |
2648 |
drbr_putback(ifp, br, m); |
2353 |
else |
2649 |
else |
Lines 2381-2387
Link Here
|
2381 |
sc = ifp->if_softc; |
2677 |
sc = ifp->if_softc; |
2382 |
npairs = sc->vtnet_act_vq_pairs; |
2678 |
npairs = sc->vtnet_act_vq_pairs; |
2383 |
|
2679 |
|
2384 |
/* check if flowid is set */ |
|
|
2385 |
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) |
2680 |
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) |
2386 |
i = m->m_pkthdr.flowid % npairs; |
2681 |
i = m->m_pkthdr.flowid % npairs; |
2387 |
else |
2682 |
else |
Lines 2471-2476
Link Here
|
2471 |
deq = 0; |
2766 |
deq = 0; |
2472 |
VTNET_TXQ_LOCK_ASSERT(txq); |
2767 |
VTNET_TXQ_LOCK_ASSERT(txq); |
2473 |
|
2768 |
|
|
|
2769 |
#ifdef DEV_NETMAP |
2770 |
if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) { |
2771 |
virtqueue_disable_intr(vq); // XXX luigi |
2772 |
return (0); // XXX or 1 ? |
2773 |
} |
2774 |
#endif |
2775 |
|
2474 |
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { |
2776 |
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { |
2475 |
m = txhdr->vth_mbuf; |
2777 |
m = txhdr->vth_mbuf; |
2476 |
deq++; |
2778 |
deq++; |
Lines 2512-2522
Link Here
|
2512 |
return; |
2814 |
return; |
2513 |
} |
2815 |
} |
2514 |
|
2816 |
|
2515 |
#ifdef DEV_NETMAP |
|
|
2516 |
if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS) |
2517 |
return; |
2518 |
#endif /* DEV_NETMAP */ |
2519 |
|
2520 |
VTNET_TXQ_LOCK(txq); |
2817 |
VTNET_TXQ_LOCK(txq); |
2521 |
|
2818 |
|
2522 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
2819 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
Lines 2703-2709
Link Here
|
2703 |
* Most drivers just ignore the return value - it only fails |
3000 |
* Most drivers just ignore the return value - it only fails |
2704 |
* with ENOMEM so an error is not likely. |
3001 |
* with ENOMEM so an error is not likely. |
2705 |
*/ |
3002 |
*/ |
2706 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
3003 |
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { |
2707 |
rxq = &sc->vtnet_rxqs[i]; |
3004 |
rxq = &sc->vtnet_rxqs[i]; |
2708 |
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, |
3005 |
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, |
2709 |
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); |
3006 |
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); |
Lines 2733-2739
Link Here
|
2733 |
rxq = &sc->vtnet_rxqs[i]; |
3030 |
rxq = &sc->vtnet_rxqs[i]; |
2734 |
if (rxq->vtnrx_tq != NULL) { |
3031 |
if (rxq->vtnrx_tq != NULL) { |
2735 |
taskqueue_free(rxq->vtnrx_tq); |
3032 |
taskqueue_free(rxq->vtnrx_tq); |
2736 |
rxq->vtnrx_tq = NULL; |
3033 |
rxq->vtnrx_vq = NULL; |
2737 |
} |
3034 |
} |
2738 |
|
3035 |
|
2739 |
txq = &sc->vtnet_txqs[i]; |
3036 |
txq = &sc->vtnet_txqs[i]; |
Lines 2773-2779
Link Here
|
2773 |
struct vtnet_txq *txq; |
3070 |
struct vtnet_txq *txq; |
2774 |
int i; |
3071 |
int i; |
2775 |
|
3072 |
|
2776 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3073 |
#ifdef DEV_NETMAP |
|
|
3074 |
if (nm_native_on(NA(sc->vtnet_ifp))) |
3075 |
return; |
3076 |
#endif |
3077 |
|
3078 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
2777 |
rxq = &sc->vtnet_rxqs[i]; |
3079 |
rxq = &sc->vtnet_rxqs[i]; |
2778 |
vtnet_rxq_free_mbufs(rxq); |
3080 |
vtnet_rxq_free_mbufs(rxq); |
2779 |
|
3081 |
|
Lines 2789-2799
Link Here
|
2789 |
struct vtnet_txq *txq; |
3091 |
struct vtnet_txq *txq; |
2790 |
int i; |
3092 |
int i; |
2791 |
|
3093 |
|
|
|
3094 |
VTNET_CORE_LOCK_ASSERT(sc); |
3095 |
|
2792 |
/* |
3096 |
/* |
2793 |
* Lock and unlock the per-queue mutex so we known the stop |
3097 |
* Lock and unlock the per-queue mutex so we known the stop |
2794 |
* state is visible. Doing only the active queues should be |
3098 |
* state is visible. Doing only the active queues should be |
2795 |
* sufficient, but it does not cost much extra to do all the |
3099 |
* sufficient, but it does not cost much extra to do all the |
2796 |
* queues. Note we hold the core mutex here too. |
3100 |
* queues. |
2797 |
*/ |
3101 |
*/ |
2798 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
3102 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
2799 |
rxq = &sc->vtnet_rxqs[i]; |
3103 |
rxq = &sc->vtnet_rxqs[i]; |
Lines 2832-2839
Link Here
|
2832 |
virtio_stop(dev); |
3136 |
virtio_stop(dev); |
2833 |
vtnet_stop_rendezvous(sc); |
3137 |
vtnet_stop_rendezvous(sc); |
2834 |
|
3138 |
|
2835 |
/* Free any mbufs left in the virtqueues. */ |
|
|
2836 |
vtnet_drain_rxtx_queues(sc); |
3139 |
vtnet_drain_rxtx_queues(sc); |
|
|
3140 |
sc->vtnet_act_vq_pairs = 1; |
2837 |
} |
3141 |
} |
2838 |
|
3142 |
|
2839 |
static int |
3143 |
static int |
Lines 2842-2892
Link Here
|
2842 |
device_t dev; |
3146 |
device_t dev; |
2843 |
struct ifnet *ifp; |
3147 |
struct ifnet *ifp; |
2844 |
uint64_t features; |
3148 |
uint64_t features; |
2845 |
int mask, error; |
3149 |
int error; |
2846 |
|
3150 |
|
2847 |
dev = sc->vtnet_dev; |
3151 |
dev = sc->vtnet_dev; |
2848 |
ifp = sc->vtnet_ifp; |
3152 |
ifp = sc->vtnet_ifp; |
2849 |
features = sc->vtnet_features; |
3153 |
features = sc->vtnet_negotiated_features; |
2850 |
|
3154 |
|
2851 |
mask = 0; |
|
|
2852 |
#if defined(INET) |
2853 |
mask |= IFCAP_RXCSUM; |
2854 |
#endif |
2855 |
#if defined (INET6) |
2856 |
mask |= IFCAP_RXCSUM_IPV6; |
2857 |
#endif |
2858 |
|
2859 |
/* |
3155 |
/* |
2860 |
* Re-negotiate with the host, removing any disabled receive |
3156 |
* Re-negotiate with the host, removing any disabled receive |
2861 |
* features. Transmit features are disabled only on our side |
3157 |
* features. Transmit features are disabled only on our side |
2862 |
* via if_capenable and if_hwassist. |
3158 |
* via if_capenable and if_hwassist. |
2863 |
*/ |
3159 |
*/ |
2864 |
|
3160 |
|
2865 |
if (ifp->if_capabilities & mask) { |
3161 |
if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0) |
2866 |
/* |
3162 |
features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES); |
2867 |
* We require both IPv4 and IPv6 offloading to be enabled |
|
|
2868 |
* in order to negotiated it: VirtIO does not distinguish |
2869 |
* between the two. |
2870 |
*/ |
2871 |
if ((ifp->if_capenable & mask) != mask) |
2872 |
features &= ~VIRTIO_NET_F_GUEST_CSUM; |
2873 |
} |
2874 |
|
3163 |
|
2875 |
if (ifp->if_capabilities & IFCAP_LRO) { |
3164 |
if ((ifp->if_capenable & IFCAP_LRO) == 0) |
2876 |
if ((ifp->if_capenable & IFCAP_LRO) == 0) |
3165 |
features &= ~VTNET_LRO_FEATURES; |
2877 |
features &= ~VTNET_LRO_FEATURES; |
|
|
2878 |
} |
2879 |
|
3166 |
|
2880 |
if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { |
3167 |
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) |
2881 |
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) |
3168 |
features &= ~VIRTIO_NET_F_CTRL_VLAN; |
2882 |
features &= ~VIRTIO_NET_F_CTRL_VLAN; |
|
|
2883 |
} |
2884 |
|
3169 |
|
2885 |
error = virtio_reinit(dev, features); |
3170 |
error = virtio_reinit(dev, features); |
2886 |
if (error) |
3171 |
if (error) { |
2887 |
device_printf(dev, "virtio reinit error %d\n", error); |
3172 |
device_printf(dev, "virtio reinit error %d\n", error); |
|
|
3173 |
return (error); |
3174 |
} |
2888 |
|
3175 |
|
2889 |
return (error); |
3176 |
sc->vtnet_features = features; |
|
|
3177 |
virtio_reinit_complete(dev); |
3178 |
|
3179 |
return (0); |
2890 |
} |
3180 |
} |
2891 |
|
3181 |
|
2892 |
static void |
3182 |
static void |
Lines 2897-2905
Link Here
|
2897 |
ifp = sc->vtnet_ifp; |
3187 |
ifp = sc->vtnet_ifp; |
2898 |
|
3188 |
|
2899 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { |
3189 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { |
2900 |
/* Restore promiscuous and all-multicast modes. */ |
|
|
2901 |
vtnet_rx_filter(sc); |
3190 |
vtnet_rx_filter(sc); |
2902 |
/* Restore filtered MAC addresses. */ |
|
|
2903 |
vtnet_rx_filter_mac(sc); |
3191 |
vtnet_rx_filter_mac(sc); |
2904 |
} |
3192 |
} |
2905 |
|
3193 |
|
Lines 2911-2942
Link Here
|
2911 |
vtnet_init_rx_queues(struct vtnet_softc *sc) |
3199 |
vtnet_init_rx_queues(struct vtnet_softc *sc) |
2912 |
{ |
3200 |
{ |
2913 |
device_t dev; |
3201 |
device_t dev; |
|
|
3202 |
struct ifnet *ifp; |
2914 |
struct vtnet_rxq *rxq; |
3203 |
struct vtnet_rxq *rxq; |
2915 |
int i, clsize, error; |
3204 |
int i, clustersz, error; |
2916 |
|
3205 |
|
2917 |
dev = sc->vtnet_dev; |
3206 |
dev = sc->vtnet_dev; |
|
|
3207 |
ifp = sc->vtnet_ifp; |
2918 |
|
3208 |
|
2919 |
/* |
3209 |
clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu); |
2920 |
* Use the new cluster size if one has been set (via a MTU |
3210 |
sc->vtnet_rx_clustersz = clustersz; |
2921 |
* change). Otherwise, use the standard 2K clusters. |
3211 |
|
2922 |
* |
3212 |
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) { |
2923 |
* BMV: It might make sense to use page sized clusters as |
3213 |
sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) + |
2924 |
* the default (depending on the features negotiated). |
3214 |
VTNET_MAX_RX_SIZE, clustersz); |
2925 |
*/ |
3215 |
KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, |
2926 |
if (sc->vtnet_rx_new_clsize != 0) { |
3216 |
("%s: too many rx mbufs %d for %d segments", __func__, |
2927 |
clsize = sc->vtnet_rx_new_clsize; |
3217 |
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); |
2928 |
sc->vtnet_rx_new_clsize = 0; |
|
|
2929 |
} else |
3218 |
} else |
2930 |
clsize = MCLBYTES; |
3219 |
sc->vtnet_rx_nmbufs = 1; |
2931 |
|
3220 |
|
2932 |
sc->vtnet_rx_clsize = clsize; |
3221 |
#ifdef DEV_NETMAP |
2933 |
sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); |
3222 |
if (vtnet_netmap_init_rx_buffers(sc)) |
|
|
3223 |
return (0); |
3224 |
#endif |
2934 |
|
3225 |
|
2935 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || |
|
|
2936 |
sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, |
2937 |
("%s: too many rx mbufs %d for %d segments", __func__, |
2938 |
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); |
2939 |
|
2940 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3226 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
2941 |
rxq = &sc->vtnet_rxqs[i]; |
3227 |
rxq = &sc->vtnet_rxqs[i]; |
2942 |
|
3228 |
|
Lines 2946-2953
Link Here
|
2946 |
VTNET_RXQ_UNLOCK(rxq); |
3232 |
VTNET_RXQ_UNLOCK(rxq); |
2947 |
|
3233 |
|
2948 |
if (error) { |
3234 |
if (error) { |
2949 |
device_printf(dev, |
3235 |
device_printf(dev, "cannot populate Rx queue %d\n", i); |
2950 |
"cannot allocate mbufs for Rx queue %d\n", i); |
|
|
2951 |
return (error); |
3236 |
return (error); |
2952 |
} |
3237 |
} |
2953 |
} |
3238 |
} |
Lines 2964-2969
Link Here
|
2964 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3249 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
2965 |
txq = &sc->vtnet_txqs[i]; |
3250 |
txq = &sc->vtnet_txqs[i]; |
2966 |
txq->vtntx_watchdog = 0; |
3251 |
txq->vtntx_watchdog = 0; |
|
|
3252 |
txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq); |
2967 |
} |
3253 |
} |
2968 |
|
3254 |
|
2969 |
return (0); |
3255 |
return (0); |
Lines 2993-3028
Link Here
|
2993 |
|
3279 |
|
2994 |
dev = sc->vtnet_dev; |
3280 |
dev = sc->vtnet_dev; |
2995 |
|
3281 |
|
2996 |
if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { |
3282 |
if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) { |
2997 |
sc->vtnet_act_vq_pairs = 1; |
3283 |
sc->vtnet_act_vq_pairs = 1; |
2998 |
return; |
3284 |
return; |
2999 |
} |
3285 |
} |
3000 |
|
3286 |
|
3001 |
npairs = sc->vtnet_requested_vq_pairs; |
3287 |
npairs = sc->vtnet_req_vq_pairs; |
3002 |
|
3288 |
|
3003 |
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { |
3289 |
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { |
3004 |
device_printf(dev, |
3290 |
device_printf(dev, "cannot set active queue pairs to %d, " |
3005 |
"cannot set active queue pairs to %d\n", npairs); |
3291 |
"falling back to 1 queue pair\n", npairs); |
3006 |
npairs = 1; |
3292 |
npairs = 1; |
3007 |
} |
3293 |
} |
3008 |
|
3294 |
|
3009 |
sc->vtnet_act_vq_pairs = npairs; |
3295 |
sc->vtnet_act_vq_pairs = npairs; |
3010 |
} |
3296 |
} |
3011 |
|
3297 |
|
|
|
3298 |
static void |
3299 |
vtnet_update_rx_offloads(struct vtnet_softc *sc) |
3300 |
{ |
3301 |
struct ifnet *ifp; |
3302 |
uint64_t features; |
3303 |
int error; |
3304 |
|
3305 |
ifp = sc->vtnet_ifp; |
3306 |
features = sc->vtnet_features; |
3307 |
|
3308 |
VTNET_CORE_LOCK_ASSERT(sc); |
3309 |
|
3310 |
if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { |
3311 |
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) |
3312 |
features |= VIRTIO_NET_F_GUEST_CSUM; |
3313 |
else |
3314 |
features &= ~VIRTIO_NET_F_GUEST_CSUM; |
3315 |
} |
3316 |
|
3317 |
if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) { |
3318 |
if (ifp->if_capenable & IFCAP_LRO) |
3319 |
features |= VTNET_LRO_FEATURES; |
3320 |
else |
3321 |
features &= ~VTNET_LRO_FEATURES; |
3322 |
} |
3323 |
|
3324 |
error = vtnet_ctrl_guest_offloads(sc, |
3325 |
features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | |
3326 |
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN | |
3327 |
VIRTIO_NET_F_GUEST_UFO)); |
3328 |
if (error) { |
3329 |
device_printf(sc->vtnet_dev, |
3330 |
"%s: cannot update Rx features\n", __func__); |
3331 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
3332 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
3333 |
vtnet_init_locked(sc); |
3334 |
} |
3335 |
} else |
3336 |
sc->vtnet_features = features; |
3337 |
} |
3338 |
|
3012 |
static int |
3339 |
static int |
3013 |
vtnet_reinit(struct vtnet_softc *sc) |
3340 |
vtnet_reinit(struct vtnet_softc *sc) |
3014 |
{ |
3341 |
{ |
|
|
3342 |
device_t dev; |
3015 |
struct ifnet *ifp; |
3343 |
struct ifnet *ifp; |
3016 |
int error; |
3344 |
int error; |
3017 |
|
3345 |
|
|
|
3346 |
dev = sc->vtnet_dev; |
3018 |
ifp = sc->vtnet_ifp; |
3347 |
ifp = sc->vtnet_ifp; |
3019 |
|
3348 |
|
3020 |
/* Use the current MAC address. */ |
|
|
3021 |
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); |
3349 |
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); |
3022 |
vtnet_set_hwaddr(sc); |
|
|
3023 |
|
3350 |
|
|
|
3351 |
error = vtnet_virtio_reinit(sc); |
3352 |
if (error) |
3353 |
return (error); |
3354 |
|
3355 |
vtnet_set_macaddr(sc); |
3024 |
vtnet_set_active_vq_pairs(sc); |
3356 |
vtnet_set_active_vq_pairs(sc); |
3025 |
|
3357 |
|
|
|
3358 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) |
3359 |
vtnet_init_rx_filters(sc); |
3360 |
|
3026 |
ifp->if_hwassist = 0; |
3361 |
ifp->if_hwassist = 0; |
3027 |
if (ifp->if_capenable & IFCAP_TXCSUM) |
3362 |
if (ifp->if_capenable & IFCAP_TXCSUM) |
3028 |
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; |
3363 |
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; |
Lines 3033-3048
Link Here
|
3033 |
if (ifp->if_capenable & IFCAP_TSO6) |
3368 |
if (ifp->if_capenable & IFCAP_TSO6) |
3034 |
ifp->if_hwassist |= CSUM_IP6_TSO; |
3369 |
ifp->if_hwassist |= CSUM_IP6_TSO; |
3035 |
|
3370 |
|
3036 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) |
|
|
3037 |
vtnet_init_rx_filters(sc); |
3038 |
|
3039 |
error = vtnet_init_rxtx_queues(sc); |
3371 |
error = vtnet_init_rxtx_queues(sc); |
3040 |
if (error) |
3372 |
if (error) |
3041 |
return (error); |
3373 |
return (error); |
3042 |
|
3374 |
|
3043 |
vtnet_enable_interrupts(sc); |
|
|
3044 |
ifp->if_drv_flags |= IFF_DRV_RUNNING; |
3045 |
|
3046 |
return (0); |
3375 |
return (0); |
3047 |
} |
3376 |
} |
3048 |
|
3377 |
|
Lines 3062-3083
Link Here
|
3062 |
|
3391 |
|
3063 |
vtnet_stop(sc); |
3392 |
vtnet_stop(sc); |
3064 |
|
3393 |
|
3065 |
/* Reinitialize with the host. */ |
3394 |
if (vtnet_reinit(sc) != 0) { |
3066 |
if (vtnet_virtio_reinit(sc) != 0) |
3395 |
vtnet_stop(sc); |
3067 |
goto fail; |
3396 |
return; |
|
|
3397 |
} |
3068 |
|
3398 |
|
3069 |
if (vtnet_reinit(sc) != 0) |
3399 |
ifp->if_drv_flags |= IFF_DRV_RUNNING; |
3070 |
goto fail; |
|
|
3071 |
|
3072 |
virtio_reinit_complete(dev); |
3073 |
|
3074 |
vtnet_update_link_status(sc); |
3400 |
vtnet_update_link_status(sc); |
|
|
3401 |
vtnet_enable_interrupts(sc); |
3075 |
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); |
3402 |
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); |
3076 |
|
|
|
3077 |
return; |
3078 |
|
3079 |
fail: |
3080 |
vtnet_stop(sc); |
3081 |
} |
3403 |
} |
3082 |
|
3404 |
|
3083 |
static void |
3405 |
static void |
Lines 3087-3092
Link Here
|
3087 |
|
3409 |
|
3088 |
sc = xsc; |
3410 |
sc = xsc; |
3089 |
|
3411 |
|
|
|
3412 |
#ifdef DEV_NETMAP |
3413 |
if (!NA(sc->vtnet_ifp)) { |
3414 |
D("try to attach again"); |
3415 |
vtnet_netmap_attach(sc); |
3416 |
} |
3417 |
#endif |
3418 |
|
3090 |
VTNET_CORE_LOCK(sc); |
3419 |
VTNET_CORE_LOCK(sc); |
3091 |
vtnet_init_locked(sc); |
3420 |
vtnet_init_locked(sc); |
3092 |
VTNET_CORE_UNLOCK(sc); |
3421 |
VTNET_CORE_UNLOCK(sc); |
Lines 3095-3110
Link Here
|
3095 |
static void |
3424 |
static void |
3096 |
vtnet_free_ctrl_vq(struct vtnet_softc *sc) |
3425 |
vtnet_free_ctrl_vq(struct vtnet_softc *sc) |
3097 |
{ |
3426 |
{ |
3098 |
struct virtqueue *vq; |
|
|
3099 |
|
3427 |
|
3100 |
vq = sc->vtnet_ctrl_vq; |
|
|
3101 |
|
3102 |
/* |
3428 |
/* |
3103 |
* The control virtqueue is only polled and therefore it should |
3429 |
* The control virtqueue is only polled and therefore it should |
3104 |
* already be empty. |
3430 |
* already be empty. |
3105 |
*/ |
3431 |
*/ |
3106 |
KASSERT(virtqueue_empty(vq), |
3432 |
KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), |
3107 |
("%s: ctrl vq %p not empty", __func__, vq)); |
3433 |
("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq)); |
3108 |
} |
3434 |
} |
3109 |
|
3435 |
|
3110 |
static void |
3436 |
static void |
Lines 3115-3161
Link Here
|
3115 |
|
3441 |
|
3116 |
vq = sc->vtnet_ctrl_vq; |
3442 |
vq = sc->vtnet_ctrl_vq; |
3117 |
|
3443 |
|
|
|
3444 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ); |
3118 |
VTNET_CORE_LOCK_ASSERT(sc); |
3445 |
VTNET_CORE_LOCK_ASSERT(sc); |
3119 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, |
|
|
3120 |
("%s: CTRL_VQ feature not negotiated", __func__)); |
3121 |
|
3446 |
|
3122 |
if (!virtqueue_empty(vq)) |
3447 |
if (!virtqueue_empty(vq)) |
3123 |
return; |
3448 |
return; |
3124 |
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) |
|
|
3125 |
return; |
3126 |
|
3449 |
|
3127 |
/* |
3450 |
/* |
3128 |
* Poll for the response, but the command is likely already |
3451 |
* Poll for the response, but the command is likely completed before |
3129 |
* done when we return from the notify. |
3452 |
* returning from the notify. |
3130 |
*/ |
3453 |
*/ |
3131 |
virtqueue_notify(vq); |
3454 |
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) { |
3132 |
virtqueue_poll(vq, NULL); |
3455 |
virtqueue_notify(vq); |
|
|
3456 |
virtqueue_poll(vq, NULL); |
3457 |
} |
3133 |
} |
3458 |
} |
3134 |
|
3459 |
|
3135 |
static int |
3460 |
static int |
3136 |
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) |
3461 |
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) |
3137 |
{ |
3462 |
{ |
3138 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
|
|
3139 |
struct sglist_seg segs[3]; |
3463 |
struct sglist_seg segs[3]; |
3140 |
struct sglist sg; |
3464 |
struct sglist sg; |
3141 |
uint8_t ack; |
3465 |
struct { |
|
|
3466 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3467 |
uint8_t pad1; |
3468 |
uint8_t addr[ETHER_ADDR_LEN] __aligned(8); |
3469 |
uint8_t pad2; |
3470 |
uint8_t ack; |
3471 |
} s; |
3142 |
int error; |
3472 |
int error; |
3143 |
|
3473 |
|
3144 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3474 |
error = 0; |
3145 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; |
3475 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC); |
3146 |
ack = VIRTIO_NET_ERR; |
|
|
3147 |
|
3476 |
|
3148 |
sglist_init(&sg, 3, segs); |
3477 |
s.hdr.class = VIRTIO_NET_CTRL_MAC; |
|
|
3478 |
s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; |
3479 |
bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN); |
3480 |
s.ack = VIRTIO_NET_ERR; |
3481 |
|
3482 |
sglist_init(&sg, nitems(segs), segs); |
3483 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3484 |
error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN); |
3485 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3486 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3487 |
|
3488 |
if (error == 0) |
3489 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3490 |
|
3491 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3492 |
} |
3493 |
|
3494 |
static int |
3495 |
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads) |
3496 |
{ |
3497 |
struct sglist_seg segs[3]; |
3498 |
struct sglist sg; |
3499 |
struct { |
3500 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3501 |
uint8_t pad1; |
3502 |
uint64_t offloads __aligned(8); |
3503 |
uint8_t pad2; |
3504 |
uint8_t ack; |
3505 |
} s; |
3506 |
int error; |
3507 |
|
3149 |
error = 0; |
3508 |
error = 0; |
3150 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3509 |
MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); |
3151 |
error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); |
|
|
3152 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3153 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
3154 |
("%s: error %d adding set MAC msg to sglist", __func__, error)); |
3155 |
|
3510 |
|
3156 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3511 |
s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS; |
|
|
3512 |
s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET; |
3513 |
s.offloads = vtnet_gtoh64(sc, offloads); |
3514 |
s.ack = VIRTIO_NET_ERR; |
3157 |
|
3515 |
|
3158 |
return (ack == VIRTIO_NET_OK ? 0 : EIO); |
3516 |
sglist_init(&sg, nitems(segs), segs); |
|
|
3517 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3518 |
error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t)); |
3519 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3520 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3521 |
|
3522 |
if (error == 0) |
3523 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3524 |
|
3525 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3159 |
} |
3526 |
} |
3160 |
|
3527 |
|
3161 |
static int |
3528 |
static int |
Lines 3164-3226
Link Here
|
3164 |
struct sglist_seg segs[3]; |
3531 |
struct sglist_seg segs[3]; |
3165 |
struct sglist sg; |
3532 |
struct sglist sg; |
3166 |
struct { |
3533 |
struct { |
3167 |
struct virtio_net_ctrl_hdr hdr; |
3534 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3168 |
uint8_t pad1; |
3535 |
uint8_t pad1; |
3169 |
struct virtio_net_ctrl_mq mq; |
3536 |
struct virtio_net_ctrl_mq mq __aligned(2); |
3170 |
uint8_t pad2; |
3537 |
uint8_t pad2; |
3171 |
uint8_t ack; |
3538 |
uint8_t ack; |
3172 |
} s __aligned(2); |
3539 |
} s; |
3173 |
int error; |
3540 |
int error; |
3174 |
|
3541 |
|
|
|
3542 |
error = 0; |
3543 |
MPASS(sc->vtnet_flags & VTNET_FLAG_MQ); |
3544 |
|
3175 |
s.hdr.class = VIRTIO_NET_CTRL_MQ; |
3545 |
s.hdr.class = VIRTIO_NET_CTRL_MQ; |
3176 |
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; |
3546 |
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; |
3177 |
s.mq.virtqueue_pairs = npairs; |
3547 |
s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs); |
3178 |
s.ack = VIRTIO_NET_ERR; |
3548 |
s.ack = VIRTIO_NET_ERR; |
3179 |
|
3549 |
|
3180 |
sglist_init(&sg, 3, segs); |
3550 |
sglist_init(&sg, nitems(segs), segs); |
3181 |
error = 0; |
|
|
3182 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3551 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3183 |
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); |
3552 |
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); |
3184 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3553 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3185 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
3554 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3186 |
("%s: error %d adding MQ message to sglist", __func__, error)); |
|
|
3187 |
|
3555 |
|
3188 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3556 |
if (error == 0) |
|
|
3557 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3189 |
|
3558 |
|
3190 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3559 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3191 |
} |
3560 |
} |
3192 |
|
3561 |
|
3193 |
static int |
3562 |
static int |
3194 |
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) |
3563 |
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on) |
3195 |
{ |
3564 |
{ |
3196 |
struct sglist_seg segs[3]; |
3565 |
struct sglist_seg segs[3]; |
3197 |
struct sglist sg; |
3566 |
struct sglist sg; |
3198 |
struct { |
3567 |
struct { |
3199 |
struct virtio_net_ctrl_hdr hdr; |
3568 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3200 |
uint8_t pad1; |
3569 |
uint8_t pad1; |
3201 |
uint8_t onoff; |
3570 |
uint8_t onoff; |
3202 |
uint8_t pad2; |
3571 |
uint8_t pad2; |
3203 |
uint8_t ack; |
3572 |
uint8_t ack; |
3204 |
} s __aligned(2); |
3573 |
} s; |
3205 |
int error; |
3574 |
int error; |
3206 |
|
3575 |
|
3207 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, |
3576 |
error = 0; |
3208 |
("%s: CTRL_RX feature not negotiated", __func__)); |
3577 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); |
3209 |
|
3578 |
|
3210 |
s.hdr.class = VIRTIO_NET_CTRL_RX; |
3579 |
s.hdr.class = VIRTIO_NET_CTRL_RX; |
3211 |
s.hdr.cmd = cmd; |
3580 |
s.hdr.cmd = cmd; |
3212 |
s.onoff = !!on; |
3581 |
s.onoff = !!on; |
3213 |
s.ack = VIRTIO_NET_ERR; |
3582 |
s.ack = VIRTIO_NET_ERR; |
3214 |
|
3583 |
|
3215 |
sglist_init(&sg, 3, segs); |
3584 |
sglist_init(&sg, nitems(segs), segs); |
3216 |
error = 0; |
|
|
3217 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3585 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3218 |
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); |
3586 |
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); |
3219 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3587 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3220 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
3588 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3221 |
("%s: error %d adding Rx message to sglist", __func__, error)); |
|
|
3222 |
|
3589 |
|
3223 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3590 |
if (error == 0) |
|
|
3591 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3224 |
|
3592 |
|
3225 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3593 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3226 |
} |
3594 |
} |
Lines 3228-3267
Link Here
|
3228 |
static int |
3596 |
static int |
3229 |
vtnet_set_promisc(struct vtnet_softc *sc, int on) |
3597 |
vtnet_set_promisc(struct vtnet_softc *sc, int on) |
3230 |
{ |
3598 |
{ |
3231 |
|
|
|
3232 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); |
3599 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); |
3233 |
} |
3600 |
} |
3234 |
|
3601 |
|
3235 |
static int |
3602 |
static int |
3236 |
vtnet_set_allmulti(struct vtnet_softc *sc, int on) |
3603 |
vtnet_set_allmulti(struct vtnet_softc *sc, int on) |
3237 |
{ |
3604 |
{ |
3238 |
|
|
|
3239 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); |
3605 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); |
3240 |
} |
3606 |
} |
3241 |
|
3607 |
|
3242 |
/* |
|
|
3243 |
* The device defaults to promiscuous mode for backwards compatibility. |
3244 |
* Turn it off at attach time if possible. |
3245 |
*/ |
3246 |
static void |
3608 |
static void |
3247 |
vtnet_attach_disable_promisc(struct vtnet_softc *sc) |
|
|
3248 |
{ |
3249 |
struct ifnet *ifp; |
3250 |
|
3251 |
ifp = sc->vtnet_ifp; |
3252 |
|
3253 |
VTNET_CORE_LOCK(sc); |
3254 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { |
3255 |
ifp->if_flags |= IFF_PROMISC; |
3256 |
} else if (vtnet_set_promisc(sc, 0) != 0) { |
3257 |
ifp->if_flags |= IFF_PROMISC; |
3258 |
device_printf(sc->vtnet_dev, |
3259 |
"cannot disable default promiscuous mode\n"); |
3260 |
} |
3261 |
VTNET_CORE_UNLOCK(sc); |
3262 |
} |
3263 |
|
3264 |
static void |
3265 |
vtnet_rx_filter(struct vtnet_softc *sc) |
3609 |
vtnet_rx_filter(struct vtnet_softc *sc) |
3266 |
{ |
3610 |
{ |
3267 |
device_t dev; |
3611 |
device_t dev; |
Lines 3272-3284
Link Here
|
3272 |
|
3616 |
|
3273 |
VTNET_CORE_LOCK_ASSERT(sc); |
3617 |
VTNET_CORE_LOCK_ASSERT(sc); |
3274 |
|
3618 |
|
3275 |
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) |
3619 |
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) { |
3276 |
device_printf(dev, "cannot %s promiscuous mode\n", |
3620 |
device_printf(dev, "cannot %s promiscuous mode\n", |
3277 |
ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); |
3621 |
ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); |
|
|
3622 |
} |
3278 |
|
3623 |
|
3279 |
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) |
3624 |
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) { |
3280 |
device_printf(dev, "cannot %s all-multicast mode\n", |
3625 |
device_printf(dev, "cannot %s all-multicast mode\n", |
3281 |
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); |
3626 |
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); |
|
|
3627 |
} |
3282 |
} |
3628 |
} |
3283 |
|
3629 |
|
3284 |
static void |
3630 |
static void |
Lines 3296-3309
Link Here
|
3296 |
|
3642 |
|
3297 |
ifp = sc->vtnet_ifp; |
3643 |
ifp = sc->vtnet_ifp; |
3298 |
filter = sc->vtnet_mac_filter; |
3644 |
filter = sc->vtnet_mac_filter; |
|
|
3645 |
|
3299 |
ucnt = 0; |
3646 |
ucnt = 0; |
3300 |
mcnt = 0; |
3647 |
mcnt = 0; |
3301 |
promisc = 0; |
3648 |
promisc = 0; |
3302 |
allmulti = 0; |
3649 |
allmulti = 0; |
|
|
3650 |
error = 0; |
3303 |
|
3651 |
|
|
|
3652 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); |
3304 |
VTNET_CORE_LOCK_ASSERT(sc); |
3653 |
VTNET_CORE_LOCK_ASSERT(sc); |
3305 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, |
|
|
3306 |
("%s: CTRL_RX feature not negotiated", __func__)); |
3307 |
|
3654 |
|
3308 |
/* Unicast MAC addresses: */ |
3655 |
/* Unicast MAC addresses: */ |
3309 |
if_addr_rlock(ifp); |
3656 |
if_addr_rlock(ifp); |
Lines 3324-3337
Link Here
|
3324 |
} |
3671 |
} |
3325 |
if_addr_runlock(ifp); |
3672 |
if_addr_runlock(ifp); |
3326 |
|
3673 |
|
3327 |
if (promisc != 0) { |
|
|
3328 |
filter->vmf_unicast.nentries = 0; |
3329 |
if_printf(ifp, "more than %d MAC addresses assigned, " |
3330 |
"falling back to promiscuous mode\n", |
3331 |
VTNET_MAX_MAC_ENTRIES); |
3332 |
} else |
3333 |
filter->vmf_unicast.nentries = ucnt; |
3334 |
|
3335 |
/* Multicast MAC addresses: */ |
3674 |
/* Multicast MAC addresses: */ |
3336 |
if_maddr_rlock(ifp); |
3675 |
if_maddr_rlock(ifp); |
3337 |
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
3676 |
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
Lines 3348-3381
Link Here
|
3348 |
} |
3687 |
} |
3349 |
if_maddr_runlock(ifp); |
3688 |
if_maddr_runlock(ifp); |
3350 |
|
3689 |
|
|
|
3690 |
if (promisc != 0) { |
3691 |
if_printf(ifp, "cannot filter more than %d MAC addresses, " |
3692 |
"falling back to promiscuous mode\n", |
3693 |
VTNET_MAX_MAC_ENTRIES); |
3694 |
ucnt = 0; |
3695 |
} |
3351 |
if (allmulti != 0) { |
3696 |
if (allmulti != 0) { |
3352 |
filter->vmf_multicast.nentries = 0; |
3697 |
if_printf(ifp, "cannot filter more than %d multicast MAC " |
3353 |
if_printf(ifp, "more than %d multicast MAC addresses " |
3698 |
"addresses, falling back to all-multicast mode\n", |
3354 |
"assigned, falling back to all-multicast mode\n", |
|
|
3355 |
VTNET_MAX_MAC_ENTRIES); |
3699 |
VTNET_MAX_MAC_ENTRIES); |
3356 |
} else |
3700 |
mcnt = 0; |
3357 |
filter->vmf_multicast.nentries = mcnt; |
3701 |
} |
3358 |
|
3702 |
|
3359 |
if (promisc != 0 && allmulti != 0) |
3703 |
if (promisc != 0 && allmulti != 0) |
3360 |
goto out; |
3704 |
goto out; |
3361 |
|
3705 |
|
|
|
3706 |
filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt); |
3707 |
filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt); |
3708 |
|
3362 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3709 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3363 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; |
3710 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; |
3364 |
ack = VIRTIO_NET_ERR; |
3711 |
ack = VIRTIO_NET_ERR; |
3365 |
|
3712 |
|
3366 |
sglist_init(&sg, 4, segs); |
3713 |
sglist_init(&sg, nitems(segs), segs); |
3367 |
error = 0; |
|
|
3368 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3714 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3369 |
error |= sglist_append(&sg, &filter->vmf_unicast, |
3715 |
error |= sglist_append(&sg, &filter->vmf_unicast, |
3370 |
sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); |
3716 |
sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN); |
3371 |
error |= sglist_append(&sg, &filter->vmf_multicast, |
3717 |
error |= sglist_append(&sg, &filter->vmf_multicast, |
3372 |
sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); |
3718 |
sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN); |
3373 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3719 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3374 |
KASSERT(error == 0 && sg.sg_nseg == 4, |
3720 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3375 |
("%s: error %d adding MAC filter msg to sglist", __func__, error)); |
|
|
3376 |
|
3721 |
|
3377 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3722 |
if (error == 0) |
3378 |
|
3723 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3379 |
if (ack != VIRTIO_NET_OK) |
3724 |
if (ack != VIRTIO_NET_OK) |
3380 |
if_printf(ifp, "error setting host MAC filter table\n"); |
3725 |
if_printf(ifp, "error setting host MAC filter table\n"); |
3381 |
|
3726 |
|
Lines 3392-3419
Link Here
|
3392 |
struct sglist_seg segs[3]; |
3737 |
struct sglist_seg segs[3]; |
3393 |
struct sglist sg; |
3738 |
struct sglist sg; |
3394 |
struct { |
3739 |
struct { |
3395 |
struct virtio_net_ctrl_hdr hdr; |
3740 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3396 |
uint8_t pad1; |
3741 |
uint8_t pad1; |
3397 |
uint16_t tag; |
3742 |
uint16_t tag __aligned(2); |
3398 |
uint8_t pad2; |
3743 |
uint8_t pad2; |
3399 |
uint8_t ack; |
3744 |
uint8_t ack; |
3400 |
} s __aligned(2); |
3745 |
} s; |
3401 |
int error; |
3746 |
int error; |
3402 |
|
3747 |
|
|
|
3748 |
error = 0; |
3749 |
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); |
3750 |
|
3403 |
s.hdr.class = VIRTIO_NET_CTRL_VLAN; |
3751 |
s.hdr.class = VIRTIO_NET_CTRL_VLAN; |
3404 |
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; |
3752 |
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; |
3405 |
s.tag = tag; |
3753 |
s.tag = vtnet_gtoh16(sc, tag); |
3406 |
s.ack = VIRTIO_NET_ERR; |
3754 |
s.ack = VIRTIO_NET_ERR; |
3407 |
|
3755 |
|
3408 |
sglist_init(&sg, 3, segs); |
3756 |
sglist_init(&sg, nitems(segs), segs); |
3409 |
error = 0; |
|
|
3410 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3757 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3411 |
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); |
3758 |
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); |
3412 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3759 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3413 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
3760 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3414 |
("%s: error %d adding VLAN message to sglist", __func__, error)); |
|
|
3415 |
|
3761 |
|
3416 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3762 |
if (error == 0) |
|
|
3763 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3417 |
|
3764 |
|
3418 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3765 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3419 |
} |
3766 |
} |
Lines 3421-3433
Link Here
|
3421 |
static void |
3768 |
static void |
3422 |
vtnet_rx_filter_vlan(struct vtnet_softc *sc) |
3769 |
vtnet_rx_filter_vlan(struct vtnet_softc *sc) |
3423 |
{ |
3770 |
{ |
|
|
3771 |
int i, bit; |
3424 |
uint32_t w; |
3772 |
uint32_t w; |
3425 |
uint16_t tag; |
3773 |
uint16_t tag; |
3426 |
int i, bit; |
|
|
3427 |
|
3774 |
|
|
|
3775 |
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); |
3428 |
VTNET_CORE_LOCK_ASSERT(sc); |
3776 |
VTNET_CORE_LOCK_ASSERT(sc); |
3429 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, |
|
|
3430 |
("%s: VLAN_FILTER feature not negotiated", __func__)); |
3431 |
|
3777 |
|
3432 |
/* Enable the filter for each configured VLAN. */ |
3778 |
/* Enable the filter for each configured VLAN. */ |
3433 |
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { |
3779 |
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { |
Lines 3466-3471
Link Here
|
3466 |
sc->vtnet_vlan_filter[idx] &= ~(1 << bit); |
3812 |
sc->vtnet_vlan_filter[idx] &= ~(1 << bit); |
3467 |
|
3813 |
|
3468 |
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && |
3814 |
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && |
|
|
3815 |
ifp->if_drv_flags & IFF_DRV_RUNNING && |
3469 |
vtnet_exec_vlan_filter(sc, add, tag) != 0) { |
3816 |
vtnet_exec_vlan_filter(sc, add, tag) != 0) { |
3470 |
device_printf(sc->vtnet_dev, |
3817 |
device_printf(sc->vtnet_dev, |
3471 |
"cannot %s VLAN %d %s the host filter table\n", |
3818 |
"cannot %s VLAN %d %s the host filter table\n", |
Lines 3495-3515
Link Here
|
3495 |
vtnet_update_vlan_filter(arg, 0, tag); |
3842 |
vtnet_update_vlan_filter(arg, 0, tag); |
3496 |
} |
3843 |
} |
3497 |
|
3844 |
|
|
|
3845 |
static void |
3846 |
vtnet_update_speed_duplex(struct vtnet_softc *sc) |
3847 |
{ |
3848 |
struct ifnet *ifp; |
3849 |
uint32_t speed; |
3850 |
|
3851 |
ifp = sc->vtnet_ifp; |
3852 |
|
3853 |
if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0) |
3854 |
return; |
3855 |
|
3856 |
/* BMV: Ignore duplex. */ |
3857 |
speed = virtio_read_dev_config_4(sc->vtnet_dev, |
3858 |
offsetof(struct virtio_net_config, speed)); |
3859 |
if (speed != -1) |
3860 |
ifp->if_baudrate = IF_Mbps(speed); |
3861 |
} |
3862 |
|
3498 |
static int |
3863 |
static int |
3499 |
vtnet_is_link_up(struct vtnet_softc *sc) |
3864 |
vtnet_is_link_up(struct vtnet_softc *sc) |
3500 |
{ |
3865 |
{ |
3501 |
device_t dev; |
|
|
3502 |
struct ifnet *ifp; |
3503 |
uint16_t status; |
3866 |
uint16_t status; |
3504 |
|
3867 |
|
3505 |
dev = sc->vtnet_dev; |
3868 |
if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0) |
3506 |
ifp = sc->vtnet_ifp; |
3869 |
return (1); |
3507 |
|
3870 |
|
3508 |
if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0) |
3871 |
status = virtio_read_dev_config_2(sc->vtnet_dev, |
3509 |
status = VIRTIO_NET_S_LINK_UP; |
3872 |
offsetof(struct virtio_net_config, status)); |
3510 |
else |
|
|
3511 |
status = virtio_read_dev_config_2(dev, |
3512 |
offsetof(struct virtio_net_config, status)); |
3513 |
|
3873 |
|
3514 |
return ((status & VIRTIO_NET_S_LINK_UP) != 0); |
3874 |
return ((status & VIRTIO_NET_S_LINK_UP) != 0); |
3515 |
} |
3875 |
} |
Lines 3521-3532
Link Here
|
3521 |
int link; |
3881 |
int link; |
3522 |
|
3882 |
|
3523 |
ifp = sc->vtnet_ifp; |
3883 |
ifp = sc->vtnet_ifp; |
3524 |
|
|
|
3525 |
VTNET_CORE_LOCK_ASSERT(sc); |
3884 |
VTNET_CORE_LOCK_ASSERT(sc); |
3526 |
link = vtnet_is_link_up(sc); |
3885 |
link = vtnet_is_link_up(sc); |
3527 |
|
3886 |
|
3528 |
/* Notify if the link status has changed. */ |
3887 |
/* Notify if the link status has changed. */ |
3529 |
if (link != 0 && sc->vtnet_link_active == 0) { |
3888 |
if (link != 0 && sc->vtnet_link_active == 0) { |
|
|
3889 |
vtnet_update_speed_duplex(sc); |
3530 |
sc->vtnet_link_active = 1; |
3890 |
sc->vtnet_link_active = 1; |
3531 |
if_link_state_change(ifp, LINK_STATE_UP); |
3891 |
if_link_state_change(ifp, LINK_STATE_UP); |
3532 |
} else if (link == 0 && sc->vtnet_link_active != 0) { |
3892 |
} else if (link == 0 && sc->vtnet_link_active != 0) { |
Lines 3538-3553
Link Here
|
3538 |
static int |
3898 |
static int |
3539 |
vtnet_ifmedia_upd(struct ifnet *ifp) |
3899 |
vtnet_ifmedia_upd(struct ifnet *ifp) |
3540 |
{ |
3900 |
{ |
3541 |
struct vtnet_softc *sc; |
3901 |
return (EOPNOTSUPP); |
3542 |
struct ifmedia *ifm; |
|
|
3543 |
|
3544 |
sc = ifp->if_softc; |
3545 |
ifm = &sc->vtnet_media; |
3546 |
|
3547 |
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
3548 |
return (EINVAL); |
3549 |
|
3550 |
return (0); |
3551 |
} |
3902 |
} |
3552 |
|
3903 |
|
3553 |
static void |
3904 |
static void |
Lines 3563-3587
Link Here
|
3563 |
VTNET_CORE_LOCK(sc); |
3914 |
VTNET_CORE_LOCK(sc); |
3564 |
if (vtnet_is_link_up(sc) != 0) { |
3915 |
if (vtnet_is_link_up(sc) != 0) { |
3565 |
ifmr->ifm_status |= IFM_ACTIVE; |
3916 |
ifmr->ifm_status |= IFM_ACTIVE; |
3566 |
ifmr->ifm_active |= VTNET_MEDIATYPE; |
3917 |
ifmr->ifm_active |= IFM_10G_T | IFM_FDX; |
3567 |
} else |
3918 |
} else |
3568 |
ifmr->ifm_active |= IFM_NONE; |
3919 |
ifmr->ifm_active |= IFM_NONE; |
3569 |
VTNET_CORE_UNLOCK(sc); |
3920 |
VTNET_CORE_UNLOCK(sc); |
3570 |
} |
3921 |
} |
3571 |
|
3922 |
|
3572 |
static void |
3923 |
static void |
3573 |
vtnet_set_hwaddr(struct vtnet_softc *sc) |
3924 |
vtnet_get_macaddr(struct vtnet_softc *sc) |
3574 |
{ |
3925 |
{ |
|
|
3926 |
|
3927 |
if (sc->vtnet_flags & VTNET_FLAG_MAC) { |
3928 |
virtio_read_device_config_array(sc->vtnet_dev, |
3929 |
offsetof(struct virtio_net_config, mac), |
3930 |
&sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN); |
3931 |
} else { |
3932 |
/* Generate a random locally administered unicast address. */ |
3933 |
sc->vtnet_hwaddr[0] = 0xB2; |
3934 |
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); |
3935 |
} |
3936 |
} |
3937 |
|
3938 |
static void |
3939 |
vtnet_set_macaddr(struct vtnet_softc *sc) |
3940 |
{ |
3575 |
device_t dev; |
3941 |
device_t dev; |
3576 |
int i; |
3942 |
int error; |
3577 |
|
3943 |
|
3578 |
dev = sc->vtnet_dev; |
3944 |
dev = sc->vtnet_dev; |
3579 |
|
3945 |
|
3580 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { |
3946 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { |
3581 |
if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) |
3947 |
error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr); |
|
|
3948 |
if (error) |
3582 |
device_printf(dev, "unable to set MAC address\n"); |
3949 |
device_printf(dev, "unable to set MAC address\n"); |
3583 |
} else if (sc->vtnet_flags & VTNET_FLAG_MAC) { |
3950 |
return; |
3584 |
for (i = 0; i < ETHER_ADDR_LEN; i++) { |
3951 |
} |
|
|
3952 |
|
3953 |
/* MAC in config is read-only in modern VirtIO. */ |
3954 |
if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) { |
3955 |
for (int i = 0; i < ETHER_ADDR_LEN; i++) { |
3585 |
virtio_write_dev_config_1(dev, |
3956 |
virtio_write_dev_config_1(dev, |
3586 |
offsetof(struct virtio_net_config, mac) + i, |
3957 |
offsetof(struct virtio_net_config, mac) + i, |
3587 |
sc->vtnet_hwaddr[i]); |
3958 |
sc->vtnet_hwaddr[i]); |
Lines 3590-3620
Link Here
|
3590 |
} |
3961 |
} |
3591 |
|
3962 |
|
3592 |
static void |
3963 |
static void |
3593 |
vtnet_get_hwaddr(struct vtnet_softc *sc) |
3964 |
vtnet_attached_set_macaddr(struct vtnet_softc *sc) |
3594 |
{ |
3965 |
{ |
3595 |
device_t dev; |
|
|
3596 |
int i; |
3597 |
|
3966 |
|
3598 |
dev = sc->vtnet_dev; |
3967 |
/* Assign MAC address if it was generated. */ |
3599 |
|
3968 |
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) |
3600 |
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { |
3969 |
vtnet_set_macaddr(sc); |
3601 |
/* |
|
|
3602 |
* Generate a random locally administered unicast address. |
3603 |
* |
3604 |
* It would be nice to generate the same MAC address across |
3605 |
* reboots, but it seems all the hosts currently available |
3606 |
* support the MAC feature, so this isn't too important. |
3607 |
*/ |
3608 |
sc->vtnet_hwaddr[0] = 0xB2; |
3609 |
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); |
3610 |
vtnet_set_hwaddr(sc); |
3611 |
return; |
3612 |
} |
3613 |
|
3614 |
for (i = 0; i < ETHER_ADDR_LEN; i++) { |
3615 |
sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, |
3616 |
offsetof(struct virtio_net_config, mac) + i); |
3617 |
} |
3618 |
} |
3970 |
} |
3619 |
|
3971 |
|
3620 |
static void |
3972 |
static void |
Lines 3645-3680
Link Here
|
3645 |
} |
3997 |
} |
3646 |
|
3998 |
|
3647 |
static void |
3999 |
static void |
3648 |
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) |
|
|
3649 |
{ |
3650 |
int size, thresh; |
3651 |
|
3652 |
size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); |
3653 |
|
3654 |
/* |
3655 |
* The Tx interrupt is disabled until the queue free count falls |
3656 |
* below our threshold. Completed frames are drained from the Tx |
3657 |
* virtqueue before transmitting new frames and in the watchdog |
3658 |
* callout, so the frequency of Tx interrupts is greatly reduced, |
3659 |
* at the cost of not freeing mbufs as quickly as they otherwise |
3660 |
* would be. |
3661 |
* |
3662 |
* N.B. We assume all the Tx queues are the same size. |
3663 |
*/ |
3664 |
thresh = size / 4; |
3665 |
|
3666 |
/* |
3667 |
* Without indirect descriptors, leave enough room for the most |
3668 |
* segments we handle. |
3669 |
*/ |
3670 |
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && |
3671 |
thresh < sc->vtnet_tx_nsegs) |
3672 |
thresh = sc->vtnet_tx_nsegs; |
3673 |
|
3674 |
sc->vtnet_tx_intr_thresh = thresh; |
3675 |
} |
3676 |
|
3677 |
static void |
3678 |
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, |
4000 |
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, |
3679 |
struct sysctl_oid_list *child, struct vtnet_rxq *rxq) |
4001 |
struct sysctl_oid_list *child, struct vtnet_rxq *rxq) |
3680 |
{ |
4002 |
{ |
Lines 3702-3707
Link Here
|
3702 |
&stats->vrxs_csum, "Receive checksum offloaded"); |
4024 |
&stats->vrxs_csum, "Receive checksum offloaded"); |
3703 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, |
4025 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, |
3704 |
&stats->vrxs_csum_failed, "Receive checksum offload failed"); |
4026 |
&stats->vrxs_csum_failed, "Receive checksum offload failed"); |
|
|
4027 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD, |
4028 |
&stats->vrxs_host_lro, "Receive host segmentation offloaded"); |
3705 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
4029 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
3706 |
&stats->vrxs_rescheduled, |
4030 |
&stats->vrxs_rescheduled, |
3707 |
"Receive interrupt handler rescheduled"); |
4031 |
"Receive interrupt handler rescheduled"); |
Lines 3732-3738
Link Here
|
3732 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, |
4056 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, |
3733 |
&stats->vtxs_csum, "Transmit checksum offloaded"); |
4057 |
&stats->vtxs_csum, "Transmit checksum offloaded"); |
3734 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, |
4058 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, |
3735 |
&stats->vtxs_tso, "Transmit segmentation offloaded"); |
4059 |
&stats->vtxs_tso, "Transmit TCP segmentation offloaded"); |
3736 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
4060 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
3737 |
&stats->vtxs_rescheduled, |
4061 |
&stats->vtxs_rescheduled, |
3738 |
"Transmit interrupt handler rescheduled"); |
4062 |
"Transmit interrupt handler rescheduled"); |
Lines 3752-3758
Link Here
|
3752 |
tree = device_get_sysctl_tree(dev); |
4076 |
tree = device_get_sysctl_tree(dev); |
3753 |
child = SYSCTL_CHILDREN(tree); |
4077 |
child = SYSCTL_CHILDREN(tree); |
3754 |
|
4078 |
|
3755 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
4079 |
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { |
3756 |
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); |
4080 |
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); |
3757 |
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); |
4081 |
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); |
3758 |
} |
4082 |
} |
Lines 3812-3827
Link Here
|
3812 |
CTLFLAG_RD, &stats->rx_task_rescheduled, |
4136 |
CTLFLAG_RD, &stats->rx_task_rescheduled, |
3813 |
"Times the receive interrupt task rescheduled itself"); |
4137 |
"Times the receive interrupt task rescheduled itself"); |
3814 |
|
4138 |
|
3815 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", |
4139 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype", |
3816 |
CTLFLAG_RD, &stats->tx_csum_bad_ethtype, |
4140 |
CTLFLAG_RD, &stats->tx_csum_unknown_ethtype, |
3817 |
"Aborted transmit of checksum offloaded buffer with unknown " |
4141 |
"Aborted transmit of checksum offloaded buffer with unknown " |
3818 |
"Ethernet type"); |
4142 |
"Ethernet type"); |
3819 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", |
4143 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch", |
3820 |
CTLFLAG_RD, &stats->tx_tso_bad_ethtype, |
4144 |
CTLFLAG_RD, &stats->tx_csum_proto_mismatch, |
3821 |
"Aborted transmit of TSO buffer with unknown Ethernet type"); |
4145 |
"Aborted transmit of checksum offloaded buffer because mismatched " |
|
|
4146 |
"protocols"); |
3822 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", |
4147 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", |
3823 |
CTLFLAG_RD, &stats->tx_tso_not_tcp, |
4148 |
CTLFLAG_RD, &stats->tx_tso_not_tcp, |
3824 |
"Aborted transmit of TSO buffer with non TCP protocol"); |
4149 |
"Aborted transmit of TSO buffer with non TCP protocol"); |
|
|
4150 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum", |
4151 |
CTLFLAG_RD, &stats->tx_tso_without_csum, |
4152 |
"Aborted transmit of TSO buffer without TCP checksum offload"); |
3825 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", |
4153 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", |
3826 |
CTLFLAG_RD, &stats->tx_defragged, |
4154 |
CTLFLAG_RD, &stats->tx_defragged, |
3827 |
"Transmit mbufs defragged"); |
4155 |
"Transmit mbufs defragged"); |
Lines 3854-3863
Link Here
|
3854 |
|
4182 |
|
3855 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", |
4183 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", |
3856 |
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, |
4184 |
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, |
3857 |
"Maximum number of supported virtqueue pairs"); |
4185 |
"Number of maximum supported virtqueue pairs"); |
3858 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs", |
4186 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs", |
3859 |
CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0, |
4187 |
CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0, |
3860 |
"Requested number of virtqueue pairs"); |
4188 |
"Number of requested virtqueue pairs"); |
3861 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", |
4189 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", |
3862 |
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, |
4190 |
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, |
3863 |
"Number of active virtqueue pairs"); |
4191 |
"Number of active virtqueue pairs"); |
Lines 3865-3870
Link Here
|
3865 |
vtnet_setup_stat_sysctl(ctx, child, sc); |
4193 |
vtnet_setup_stat_sysctl(ctx, child, sc); |
3866 |
} |
4194 |
} |
3867 |
|
4195 |
|
|
|
4196 |
static void |
4197 |
vtnet_load_tunables(struct vtnet_softc *sc) |
4198 |
{ |
4199 |
|
4200 |
sc->vtnet_lro_entry_count = vtnet_tunable_int(sc, |
4201 |
"lro_entry_count", vtnet_lro_entry_count); |
4202 |
if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES) |
4203 |
sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES; |
4204 |
|
4205 |
sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc, |
4206 |
"lro_mbufq_depeth", vtnet_lro_mbufq_depth); |
4207 |
} |
4208 |
|
3868 |
static int |
4209 |
static int |
3869 |
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) |
4210 |
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) |
3870 |
{ |
4211 |
{ |
Lines 3906-3915
Link Here
|
3906 |
static void |
4247 |
static void |
3907 |
vtnet_enable_rx_interrupts(struct vtnet_softc *sc) |
4248 |
vtnet_enable_rx_interrupts(struct vtnet_softc *sc) |
3908 |
{ |
4249 |
{ |
|
|
4250 |
struct vtnet_rxq *rxq; |
3909 |
int i; |
4251 |
int i; |
3910 |
|
4252 |
|
3911 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4253 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3912 |
vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); |
4254 |
rxq = &sc->vtnet_rxqs[i]; |
|
|
4255 |
if (vtnet_rxq_enable_intr(rxq) != 0) |
4256 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
4257 |
} |
3913 |
} |
4258 |
} |
3914 |
|
4259 |
|
3915 |
static void |
4260 |
static void |
Lines 3934-3940
Link Here
|
3934 |
{ |
4279 |
{ |
3935 |
int i; |
4280 |
int i; |
3936 |
|
4281 |
|
3937 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4282 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) |
3938 |
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); |
4283 |
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); |
3939 |
} |
4284 |
} |
3940 |
|
4285 |
|
Lines 3943-3949
Link Here
|
3943 |
{ |
4288 |
{ |
3944 |
int i; |
4289 |
int i; |
3945 |
|
4290 |
|
3946 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4291 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) |
3947 |
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); |
4292 |
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); |
3948 |
} |
4293 |
} |
3949 |
|
4294 |
|