Lines 69-75
Link Here
|
69 |
#include <netinet6/ip6_var.h> |
69 |
#include <netinet6/ip6_var.h> |
70 |
#include <netinet/udp.h> |
70 |
#include <netinet/udp.h> |
71 |
#include <netinet/tcp.h> |
71 |
#include <netinet/tcp.h> |
72 |
#include <netinet/tcp_lro.h> |
|
|
73 |
#include <netinet/netdump/netdump.h> |
72 |
#include <netinet/netdump/netdump.h> |
74 |
|
73 |
|
75 |
#include <machine/bus.h> |
74 |
#include <machine/bus.h> |
Lines 86-95
Link Here
|
86 |
#include "opt_inet.h" |
85 |
#include "opt_inet.h" |
87 |
#include "opt_inet6.h" |
86 |
#include "opt_inet6.h" |
88 |
|
87 |
|
89 |
#if defined(INET) || defined(INET6) |
|
|
90 |
#include <machine/in_cksum.h> |
91 |
#endif |
92 |
|
93 |
static int vtnet_modevent(module_t, int, void *); |
88 |
static int vtnet_modevent(module_t, int, void *); |
94 |
|
89 |
|
95 |
static int vtnet_probe(device_t); |
90 |
static int vtnet_probe(device_t); |
Lines 101-108
Link Here
|
101 |
static int vtnet_attach_completed(device_t); |
96 |
static int vtnet_attach_completed(device_t); |
102 |
static int vtnet_config_change(device_t); |
97 |
static int vtnet_config_change(device_t); |
103 |
|
98 |
|
104 |
static int vtnet_negotiate_features(struct vtnet_softc *); |
99 |
static void vtnet_negotiate_features(struct vtnet_softc *); |
105 |
static int vtnet_setup_features(struct vtnet_softc *); |
100 |
static void vtnet_setup_features(struct vtnet_softc *); |
106 |
static int vtnet_init_rxq(struct vtnet_softc *, int); |
101 |
static int vtnet_init_rxq(struct vtnet_softc *, int); |
107 |
static int vtnet_init_txq(struct vtnet_softc *, int); |
102 |
static int vtnet_init_txq(struct vtnet_softc *, int); |
108 |
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); |
103 |
static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); |
Lines 110-121
Link Here
|
110 |
static int vtnet_alloc_rx_filters(struct vtnet_softc *); |
105 |
static int vtnet_alloc_rx_filters(struct vtnet_softc *); |
111 |
static void vtnet_free_rx_filters(struct vtnet_softc *); |
106 |
static void vtnet_free_rx_filters(struct vtnet_softc *); |
112 |
static int vtnet_alloc_virtqueues(struct vtnet_softc *); |
107 |
static int vtnet_alloc_virtqueues(struct vtnet_softc *); |
113 |
static int vtnet_alloc_interface(struct vtnet_softc *); |
|
|
114 |
static int vtnet_setup_interface(struct vtnet_softc *); |
108 |
static int vtnet_setup_interface(struct vtnet_softc *); |
115 |
static int vtnet_ioctl_mtu(struct vtnet_softc *, int); |
109 |
static int vtnet_change_mtu(struct vtnet_softc *, int); |
116 |
static int vtnet_ioctl_ifflags(struct vtnet_softc *); |
|
|
117 |
static int vtnet_ioctl_multi(struct vtnet_softc *); |
118 |
static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *); |
119 |
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); |
110 |
static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); |
120 |
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); |
111 |
static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); |
121 |
|
112 |
|
Lines 123-137
Link Here
|
123 |
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); |
114 |
static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); |
124 |
static struct mbuf * |
115 |
static struct mbuf * |
125 |
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); |
116 |
vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); |
126 |
static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *, |
117 |
static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, |
127 |
struct mbuf *, int); |
118 |
struct mbuf *, int); |
128 |
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); |
119 |
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); |
129 |
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); |
120 |
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); |
130 |
static int vtnet_rxq_new_buf(struct vtnet_rxq *); |
121 |
static int vtnet_rxq_new_buf(struct vtnet_rxq *); |
131 |
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *, |
|
|
132 |
uint16_t, int, struct virtio_net_hdr *); |
133 |
static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, |
134 |
uint16_t, int, struct virtio_net_hdr *); |
135 |
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, |
122 |
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, |
136 |
struct virtio_net_hdr *); |
123 |
struct virtio_net_hdr *); |
137 |
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); |
124 |
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); |
Lines 143-149
Link Here
|
143 |
static void vtnet_rx_vq_intr(void *); |
130 |
static void vtnet_rx_vq_intr(void *); |
144 |
static void vtnet_rxq_tq_intr(void *, int); |
131 |
static void vtnet_rxq_tq_intr(void *, int); |
145 |
|
132 |
|
146 |
static int vtnet_txq_intr_threshold(struct vtnet_txq *); |
|
|
147 |
static int vtnet_txq_below_threshold(struct vtnet_txq *); |
133 |
static int vtnet_txq_below_threshold(struct vtnet_txq *); |
148 |
static int vtnet_txq_notify(struct vtnet_txq *); |
134 |
static int vtnet_txq_notify(struct vtnet_txq *); |
149 |
static void vtnet_txq_free_mbufs(struct vtnet_txq *); |
135 |
static void vtnet_txq_free_mbufs(struct vtnet_txq *); |
Lines 193-199
Link Here
|
193 |
static int vtnet_init_tx_queues(struct vtnet_softc *); |
179 |
static int vtnet_init_tx_queues(struct vtnet_softc *); |
194 |
static int vtnet_init_rxtx_queues(struct vtnet_softc *); |
180 |
static int vtnet_init_rxtx_queues(struct vtnet_softc *); |
195 |
static void vtnet_set_active_vq_pairs(struct vtnet_softc *); |
181 |
static void vtnet_set_active_vq_pairs(struct vtnet_softc *); |
196 |
static void vtnet_update_rx_offloads(struct vtnet_softc *); |
|
|
197 |
static int vtnet_reinit(struct vtnet_softc *); |
182 |
static int vtnet_reinit(struct vtnet_softc *); |
198 |
static void vtnet_init_locked(struct vtnet_softc *); |
183 |
static void vtnet_init_locked(struct vtnet_softc *); |
199 |
static void vtnet_init(void *); |
184 |
static void vtnet_init(void *); |
Lines 202-212
Link Here
|
202 |
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, |
187 |
static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, |
203 |
struct sglist *, int, int); |
188 |
struct sglist *, int, int); |
204 |
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); |
189 |
static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); |
205 |
static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t); |
|
|
206 |
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); |
190 |
static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); |
207 |
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, int); |
191 |
static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); |
208 |
static int vtnet_set_promisc(struct vtnet_softc *, int); |
192 |
static int vtnet_set_promisc(struct vtnet_softc *, int); |
209 |
static int vtnet_set_allmulti(struct vtnet_softc *, int); |
193 |
static int vtnet_set_allmulti(struct vtnet_softc *, int); |
|
|
194 |
static void vtnet_attach_disable_promisc(struct vtnet_softc *); |
210 |
static void vtnet_rx_filter(struct vtnet_softc *); |
195 |
static void vtnet_rx_filter(struct vtnet_softc *); |
211 |
static void vtnet_rx_filter_mac(struct vtnet_softc *); |
196 |
static void vtnet_rx_filter_mac(struct vtnet_softc *); |
212 |
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); |
197 |
static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); |
Lines 215-237
Link Here
|
215 |
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); |
200 |
static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); |
216 |
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); |
201 |
static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); |
217 |
|
202 |
|
218 |
static void vtnet_update_speed_duplex(struct vtnet_softc *); |
|
|
219 |
static int vtnet_is_link_up(struct vtnet_softc *); |
203 |
static int vtnet_is_link_up(struct vtnet_softc *); |
220 |
static void vtnet_update_link_status(struct vtnet_softc *); |
204 |
static void vtnet_update_link_status(struct vtnet_softc *); |
221 |
static int vtnet_ifmedia_upd(struct ifnet *); |
205 |
static int vtnet_ifmedia_upd(struct ifnet *); |
222 |
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
206 |
static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
223 |
static void vtnet_get_macaddr(struct vtnet_softc *); |
207 |
static void vtnet_get_hwaddr(struct vtnet_softc *); |
224 |
static void vtnet_set_macaddr(struct vtnet_softc *); |
208 |
static void vtnet_set_hwaddr(struct vtnet_softc *); |
225 |
static void vtnet_attached_set_macaddr(struct vtnet_softc *); |
|
|
226 |
static void vtnet_vlan_tag_remove(struct mbuf *); |
209 |
static void vtnet_vlan_tag_remove(struct mbuf *); |
227 |
static void vtnet_set_rx_process_limit(struct vtnet_softc *); |
210 |
static void vtnet_set_rx_process_limit(struct vtnet_softc *); |
|
|
211 |
static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); |
228 |
|
212 |
|
229 |
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, |
213 |
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, |
230 |
struct sysctl_oid_list *, struct vtnet_rxq *); |
214 |
struct sysctl_oid_list *, struct vtnet_rxq *); |
231 |
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, |
215 |
static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, |
232 |
struct sysctl_oid_list *, struct vtnet_txq *); |
216 |
struct sysctl_oid_list *, struct vtnet_txq *); |
233 |
static void vtnet_setup_queue_sysctl(struct vtnet_softc *); |
217 |
static void vtnet_setup_queue_sysctl(struct vtnet_softc *); |
234 |
static void vtnet_load_tunables(struct vtnet_softc *); |
|
|
235 |
static void vtnet_setup_sysctl(struct vtnet_softc *); |
218 |
static void vtnet_setup_sysctl(struct vtnet_softc *); |
236 |
|
219 |
|
237 |
static int vtnet_rxq_enable_intr(struct vtnet_rxq *); |
220 |
static int vtnet_rxq_enable_intr(struct vtnet_rxq *); |
Lines 249-332
Link Here
|
249 |
|
232 |
|
250 |
NETDUMP_DEFINE(vtnet); |
233 |
NETDUMP_DEFINE(vtnet); |
251 |
|
234 |
|
252 |
#define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val) |
235 |
/* Tunables. */ |
253 |
#define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val) |
236 |
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters"); |
254 |
#define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val) |
|
|
255 |
#define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val) |
256 |
#define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val) |
257 |
#define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val) |
258 |
|
259 |
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VirtIO Net driver"); |
260 |
|
261 |
static int vtnet_csum_disable = 0; |
237 |
static int vtnet_csum_disable = 0; |
|
|
238 |
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); |
262 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, |
239 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, |
263 |
&vtnet_csum_disable, 0, "Disables receive and send checksum offload"); |
240 |
&vtnet_csum_disable, 0, "Disables receive and send checksum offload"); |
264 |
|
|
|
265 |
static int vtnet_fixup_needs_csum = 0; |
266 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN, |
267 |
&vtnet_fixup_needs_csum, 0, |
268 |
"Calculate valid checksum for NEEDS_CSUM packets"); |
269 |
|
270 |
static int vtnet_tso_disable = 0; |
241 |
static int vtnet_tso_disable = 0; |
271 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, |
242 |
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); |
272 |
&vtnet_tso_disable, 0, "Disables TSO"); |
243 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable, |
273 |
|
244 |
0, "Disables TCP Segmentation Offload"); |
274 |
static int vtnet_lro_disable = 0; |
245 |
static int vtnet_lro_disable = 0; |
275 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, |
246 |
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); |
276 |
&vtnet_lro_disable, 0, "Disables hardware LRO"); |
247 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable, |
277 |
|
248 |
0, "Disables TCP Large Receive Offload"); |
278 |
static int vtnet_mq_disable = 0; |
249 |
static int vtnet_mq_disable = 0; |
279 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, |
250 |
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); |
280 |
&vtnet_mq_disable, 0, "Disables multiqueue support"); |
251 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable, |
281 |
|
252 |
0, "Disables Multi Queue support"); |
282 |
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; |
253 |
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; |
|
|
254 |
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); |
283 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, |
255 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, |
284 |
&vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs"); |
256 |
&vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs"); |
285 |
|
257 |
static int vtnet_rx_process_limit = 512; |
286 |
static int vtnet_tso_maxlen = IP_MAXPACKET; |
258 |
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); |
287 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, |
|
|
288 |
&vtnet_tso_maxlen, 0, "TSO burst limit"); |
289 |
|
290 |
static int vtnet_rx_process_limit = 1024; |
291 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, |
259 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, |
292 |
&vtnet_rx_process_limit, 0, |
260 |
&vtnet_rx_process_limit, 0, |
293 |
"Number of RX segments processed in one pass"); |
261 |
"Limits the number RX segments processed in a single pass"); |
294 |
|
262 |
|
295 |
static int vtnet_lro_entry_count = 128; |
|
|
296 |
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, |
297 |
&vtnet_lro_entry_count, 0, "Software LRO entry count"); |
298 |
|
299 |
/* Enable sorted LRO, and the depth of the mbuf queue. */ |
300 |
static int vtnet_lro_mbufq_depth = 0; |
301 |
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, |
302 |
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue"); |
303 |
|
304 |
static uma_zone_t vtnet_tx_header_zone; |
263 |
static uma_zone_t vtnet_tx_header_zone; |
305 |
|
264 |
|
306 |
static struct virtio_feature_desc vtnet_feature_desc[] = { |
265 |
static struct virtio_feature_desc vtnet_feature_desc[] = { |
307 |
{ VIRTIO_NET_F_CSUM, "TxChecksum" }, |
266 |
{ VIRTIO_NET_F_CSUM, "TxChecksum" }, |
308 |
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, |
267 |
{ VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, |
309 |
{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" }, |
268 |
{ VIRTIO_NET_F_MAC, "MacAddress" }, |
310 |
{ VIRTIO_NET_F_MAC, "MAC" }, |
269 |
{ VIRTIO_NET_F_GSO, "TxAllGSO" }, |
311 |
{ VIRTIO_NET_F_GSO, "TxGSO" }, |
270 |
{ VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, |
312 |
{ VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" }, |
271 |
{ VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, |
313 |
{ VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" }, |
272 |
{ VIRTIO_NET_F_GUEST_ECN, "RxECN" }, |
314 |
{ VIRTIO_NET_F_GUEST_ECN, "RxLROECN" }, |
273 |
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, |
315 |
{ VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, |
274 |
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, |
316 |
{ VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, |
275 |
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, |
317 |
{ VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, |
276 |
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, |
318 |
{ VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, |
277 |
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" }, |
319 |
{ VIRTIO_NET_F_HOST_UFO, "TxUFO" }, |
278 |
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, |
320 |
{ VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, |
279 |
{ VIRTIO_NET_F_STATUS, "Status" }, |
321 |
{ VIRTIO_NET_F_STATUS, "Status" }, |
280 |
{ VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, |
322 |
{ VIRTIO_NET_F_CTRL_VQ, "CtrlVq" }, |
281 |
{ VIRTIO_NET_F_CTRL_RX, "RxMode" }, |
323 |
{ VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" }, |
282 |
{ VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, |
324 |
{ VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" }, |
283 |
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, |
325 |
{ VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" }, |
284 |
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, |
326 |
{ VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, |
285 |
{ VIRTIO_NET_F_MQ, "Multiqueue" }, |
327 |
{ VIRTIO_NET_F_MQ, "Multiqueue" }, |
286 |
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, |
328 |
{ VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" }, |
|
|
329 |
{ VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" }, |
330 |
|
287 |
|
331 |
{ 0, NULL } |
288 |
{ 0, NULL } |
332 |
}; |
289 |
}; |
Lines 349-372
Link Here
|
349 |
|
306 |
|
350 |
#ifdef DEV_NETMAP |
307 |
#ifdef DEV_NETMAP |
351 |
#include <dev/netmap/if_vtnet_netmap.h> |
308 |
#include <dev/netmap/if_vtnet_netmap.h> |
352 |
#endif |
309 |
#endif /* DEV_NETMAP */ |
353 |
|
310 |
|
354 |
static driver_t vtnet_driver = { |
311 |
static driver_t vtnet_driver = { |
355 |
.name = "vtnet", |
312 |
"vtnet", |
356 |
.methods = vtnet_methods, |
313 |
vtnet_methods, |
357 |
.size = sizeof(struct vtnet_softc) |
314 |
sizeof(struct vtnet_softc) |
358 |
}; |
315 |
}; |
359 |
static devclass_t vtnet_devclass; |
316 |
static devclass_t vtnet_devclass; |
360 |
|
317 |
|
361 |
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, |
318 |
DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, |
362 |
vtnet_modevent, 0); |
319 |
vtnet_modevent, 0); |
363 |
DRIVER_MODULE(vtnet, vtpcil, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); |
320 |
DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, |
364 |
DRIVER_MODULE(vtnet, vtpcim, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); |
321 |
vtnet_modevent, 0); |
365 |
MODULE_VERSION(vtnet, 1); |
322 |
MODULE_VERSION(vtnet, 1); |
366 |
MODULE_DEPEND(vtnet, virtio, 1, 1, 1); |
323 |
MODULE_DEPEND(vtnet, virtio, 1, 1, 1); |
367 |
#ifdef DEV_NETMAP |
324 |
#ifdef DEV_NETMAP |
368 |
MODULE_DEPEND(vtnet, netmap, 1, 1, 1); |
325 |
MODULE_DEPEND(vtnet, netmap, 1, 1, 1); |
369 |
#endif |
326 |
#endif /* DEV_NETMAP */ |
370 |
|
327 |
|
371 |
static int |
328 |
static int |
372 |
vtnet_modevent(module_t mod, int type, void *unused) |
329 |
vtnet_modevent(module_t mod, int type, void *unused) |
Lines 408-414
Link Here
|
408 |
if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) |
365 |
if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) |
409 |
return (ENXIO); |
366 |
return (ENXIO); |
410 |
|
367 |
|
411 |
device_set_desc(dev, "VirtIO Network Adapter"); |
368 |
device_set_desc(dev, "VirtIO Networking Adapter"); |
412 |
|
369 |
|
413 |
return (BUS_PROBE_DEFAULT); |
370 |
return (BUS_PROBE_DEFAULT); |
414 |
} |
371 |
} |
Lines 421-446
Link Here
|
421 |
|
378 |
|
422 |
sc = device_get_softc(dev); |
379 |
sc = device_get_softc(dev); |
423 |
sc->vtnet_dev = dev; |
380 |
sc->vtnet_dev = dev; |
|
|
381 |
|
382 |
/* Register our feature descriptions. */ |
424 |
virtio_set_feature_desc(dev, vtnet_feature_desc); |
383 |
virtio_set_feature_desc(dev, vtnet_feature_desc); |
425 |
|
384 |
|
426 |
VTNET_CORE_LOCK_INIT(sc); |
385 |
VTNET_CORE_LOCK_INIT(sc); |
427 |
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); |
386 |
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); |
428 |
vtnet_load_tunables(sc); |
|
|
429 |
|
387 |
|
430 |
error = vtnet_alloc_interface(sc); |
|
|
431 |
if (error) { |
432 |
device_printf(dev, "cannot allocate interface\n"); |
433 |
goto fail; |
434 |
} |
435 |
|
436 |
vtnet_setup_sysctl(sc); |
388 |
vtnet_setup_sysctl(sc); |
|
|
389 |
vtnet_setup_features(sc); |
437 |
|
390 |
|
438 |
error = vtnet_setup_features(sc); |
|
|
439 |
if (error) { |
440 |
device_printf(dev, "cannot setup features\n"); |
441 |
goto fail; |
442 |
} |
443 |
|
444 |
error = vtnet_alloc_rx_filters(sc); |
391 |
error = vtnet_alloc_rx_filters(sc); |
445 |
if (error) { |
392 |
if (error) { |
446 |
device_printf(dev, "cannot allocate Rx filters\n"); |
393 |
device_printf(dev, "cannot allocate Rx filters\n"); |
Lines 467-480
Link Here
|
467 |
|
414 |
|
468 |
error = virtio_setup_intr(dev, INTR_TYPE_NET); |
415 |
error = virtio_setup_intr(dev, INTR_TYPE_NET); |
469 |
if (error) { |
416 |
if (error) { |
470 |
device_printf(dev, "cannot setup interrupts\n"); |
417 |
device_printf(dev, "cannot setup virtqueue interrupts\n"); |
|
|
418 |
/* BMV: This will crash if during boot! */ |
471 |
ether_ifdetach(sc->vtnet_ifp); |
419 |
ether_ifdetach(sc->vtnet_ifp); |
472 |
goto fail; |
420 |
goto fail; |
473 |
} |
421 |
} |
474 |
|
422 |
|
475 |
#ifdef DEV_NETMAP |
423 |
#ifdef DEV_NETMAP |
476 |
vtnet_netmap_attach(sc); |
424 |
vtnet_netmap_attach(sc); |
477 |
#endif |
425 |
#endif /* DEV_NETMAP */ |
|
|
426 |
|
478 |
vtnet_start_taskqueues(sc); |
427 |
vtnet_start_taskqueues(sc); |
479 |
|
428 |
|
480 |
fail: |
429 |
fail: |
Lines 506-512
Link Here
|
506 |
|
455 |
|
507 |
#ifdef DEV_NETMAP |
456 |
#ifdef DEV_NETMAP |
508 |
netmap_detach(ifp); |
457 |
netmap_detach(ifp); |
509 |
#endif |
458 |
#endif /* DEV_NETMAP */ |
510 |
|
459 |
|
511 |
vtnet_free_taskqueues(sc); |
460 |
vtnet_free_taskqueues(sc); |
512 |
|
461 |
|
Lines 573-578
Link Here
|
573 |
static int |
522 |
static int |
574 |
vtnet_shutdown(device_t dev) |
523 |
vtnet_shutdown(device_t dev) |
575 |
{ |
524 |
{ |
|
|
525 |
|
576 |
/* |
526 |
/* |
577 |
* Suspend already does all of what we need to |
527 |
* Suspend already does all of what we need to |
578 |
* do here; we just never expect to be resumed. |
528 |
* do here; we just never expect to be resumed. |
Lines 583-596
Link Here
|
583 |
static int |
533 |
static int |
584 |
vtnet_attach_completed(device_t dev) |
534 |
vtnet_attach_completed(device_t dev) |
585 |
{ |
535 |
{ |
586 |
struct vtnet_softc *sc; |
|
|
587 |
|
536 |
|
588 |
sc = device_get_softc(dev); |
537 |
vtnet_attach_disable_promisc(device_get_softc(dev)); |
589 |
|
538 |
|
590 |
VTNET_CORE_LOCK(sc); |
|
|
591 |
vtnet_attached_set_macaddr(sc); |
592 |
VTNET_CORE_UNLOCK(sc); |
593 |
|
594 |
return (0); |
539 |
return (0); |
595 |
} |
540 |
} |
596 |
|
541 |
|
Lines 610-676
Link Here
|
610 |
return (0); |
555 |
return (0); |
611 |
} |
556 |
} |
612 |
|
557 |
|
613 |
static int |
558 |
static void |
614 |
vtnet_negotiate_features(struct vtnet_softc *sc) |
559 |
vtnet_negotiate_features(struct vtnet_softc *sc) |
615 |
{ |
560 |
{ |
616 |
device_t dev; |
561 |
device_t dev; |
617 |
uint64_t features, negotiated_features; |
562 |
uint64_t mask, features; |
618 |
int no_csum; |
|
|
619 |
|
563 |
|
620 |
dev = sc->vtnet_dev; |
564 |
dev = sc->vtnet_dev; |
621 |
features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES : |
565 |
mask = 0; |
622 |
VTNET_LEGACY_FEATURES; |
|
|
623 |
|
566 |
|
624 |
/* |
567 |
/* |
625 |
* TSO and LRO are only available when their corresponding checksum |
568 |
* TSO and LRO are only available when their corresponding checksum |
626 |
* offload feature is also negotiated. |
569 |
* offload feature is also negotiated. |
627 |
*/ |
570 |
*/ |
628 |
no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable); |
571 |
if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { |
629 |
if (no_csum) |
572 |
mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; |
630 |
features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM); |
573 |
mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; |
631 |
if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) |
574 |
} |
632 |
features &= ~VTNET_TSO_FEATURES; |
575 |
if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) |
633 |
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) |
576 |
mask |= VTNET_TSO_FEATURES; |
634 |
features &= ~VTNET_LRO_FEATURES; |
577 |
if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) |
635 |
|
578 |
mask |= VTNET_LRO_FEATURES; |
636 |
#ifndef VTNET_LEGACY_TX |
579 |
#ifndef VTNET_LEGACY_TX |
637 |
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) |
580 |
if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) |
638 |
features &= ~VIRTIO_NET_F_MQ; |
581 |
mask |= VIRTIO_NET_F_MQ; |
639 |
#else |
582 |
#else |
640 |
features &= ~VIRTIO_NET_F_MQ; |
583 |
mask |= VIRTIO_NET_F_MQ; |
641 |
#endif |
584 |
#endif |
642 |
|
585 |
|
643 |
negotiated_features = virtio_negotiate_features(dev, features); |
586 |
features = VTNET_FEATURES & ~mask; |
|
|
587 |
sc->vtnet_features = virtio_negotiate_features(dev, features); |
644 |
|
588 |
|
645 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { |
|
|
646 |
uint16_t mtu; |
647 |
|
648 |
mtu = virtio_read_dev_config_2(dev, |
649 |
offsetof(struct virtio_net_config, mtu)); |
650 |
if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) { |
651 |
device_printf(dev, "Invalid MTU value: %d. " |
652 |
"MTU feature disabled.\n", mtu); |
653 |
features &= ~VIRTIO_NET_F_MTU; |
654 |
negotiated_features = |
655 |
virtio_negotiate_features(dev, features); |
656 |
} |
657 |
} |
658 |
|
659 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { |
660 |
uint16_t npairs; |
661 |
|
662 |
npairs = virtio_read_dev_config_2(dev, |
663 |
offsetof(struct virtio_net_config, max_virtqueue_pairs)); |
664 |
if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
665 |
npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { |
666 |
device_printf(dev, "Invalid max_virtqueue_pairs value: " |
667 |
"%d. Multiqueue feature disabled.\n", npairs); |
668 |
features &= ~VIRTIO_NET_F_MQ; |
669 |
negotiated_features = |
670 |
virtio_negotiate_features(dev, features); |
671 |
} |
672 |
} |
673 |
|
674 |
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && |
589 |
if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && |
675 |
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { |
590 |
virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { |
676 |
/* |
591 |
/* |
Lines 684-718
Link Here
|
684 |
*/ |
599 |
*/ |
685 |
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { |
600 |
if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { |
686 |
device_printf(dev, |
601 |
device_printf(dev, |
687 |
"Host LRO disabled since both mergeable buffers " |
602 |
"LRO disabled due to both mergeable buffers and " |
688 |
"and indirect descriptors were not negotiated\n"); |
603 |
"indirect descriptors not negotiated\n"); |
|
|
604 |
|
689 |
features &= ~VTNET_LRO_FEATURES; |
605 |
features &= ~VTNET_LRO_FEATURES; |
690 |
negotiated_features = |
606 |
sc->vtnet_features = |
691 |
virtio_negotiate_features(dev, features); |
607 |
virtio_negotiate_features(dev, features); |
692 |
} else |
608 |
} else |
693 |
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; |
609 |
sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; |
694 |
} |
610 |
} |
695 |
|
|
|
696 |
sc->vtnet_features = negotiated_features; |
697 |
sc->vtnet_negotiated_features = negotiated_features; |
698 |
|
699 |
return (virtio_finalize_features(dev)); |
700 |
} |
611 |
} |
701 |
|
612 |
|
702 |
static int |
613 |
static void |
703 |
vtnet_setup_features(struct vtnet_softc *sc) |
614 |
vtnet_setup_features(struct vtnet_softc *sc) |
704 |
{ |
615 |
{ |
705 |
device_t dev; |
616 |
device_t dev; |
706 |
int error; |
|
|
707 |
|
617 |
|
708 |
dev = sc->vtnet_dev; |
618 |
dev = sc->vtnet_dev; |
709 |
|
619 |
|
710 |
error = vtnet_negotiate_features(sc); |
620 |
vtnet_negotiate_features(sc); |
711 |
if (error) |
|
|
712 |
return (error); |
713 |
|
621 |
|
714 |
if (virtio_with_feature(dev, VIRTIO_F_VERSION_1)) |
|
|
715 |
sc->vtnet_flags |= VTNET_FLAG_MODERN; |
716 |
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) |
622 |
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) |
717 |
sc->vtnet_flags |= VTNET_FLAG_INDIRECT; |
623 |
sc->vtnet_flags |= VTNET_FLAG_INDIRECT; |
718 |
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) |
624 |
if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) |
Lines 723-768
Link Here
|
723 |
sc->vtnet_flags |= VTNET_FLAG_MAC; |
629 |
sc->vtnet_flags |= VTNET_FLAG_MAC; |
724 |
} |
630 |
} |
725 |
|
631 |
|
726 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { |
|
|
727 |
sc->vtnet_max_mtu = virtio_read_dev_config_2(dev, |
728 |
offsetof(struct virtio_net_config, mtu)); |
729 |
} else |
730 |
sc->vtnet_max_mtu = VTNET_MAX_MTU; |
731 |
|
732 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { |
632 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { |
733 |
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; |
633 |
sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; |
734 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
634 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
735 |
} else if (vtnet_modern(sc)) { |
|
|
736 |
/* This is identical to the mergeable header. */ |
737 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1); |
738 |
} else |
635 |
} else |
739 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); |
636 |
sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); |
740 |
|
637 |
|
741 |
if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
638 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
742 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE; |
639 |
sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; |
743 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
640 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
744 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG; |
641 |
sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; |
745 |
else |
642 |
else |
746 |
sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE; |
643 |
sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; |
747 |
|
644 |
|
748 |
/* |
|
|
749 |
* Favor "hardware" LRO if negotiated, but support software LRO as |
750 |
* a fallback; there is usually little benefit (or worse) with both. |
751 |
*/ |
752 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 && |
753 |
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0) |
754 |
sc->vtnet_flags |= VTNET_FLAG_SW_LRO; |
755 |
|
756 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || |
645 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || |
757 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || |
646 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || |
758 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
647 |
virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
759 |
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX; |
648 |
sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; |
760 |
else |
649 |
else |
761 |
sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN; |
650 |
sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; |
762 |
|
651 |
|
763 |
sc->vtnet_req_vq_pairs = 1; |
|
|
764 |
sc->vtnet_max_vq_pairs = 1; |
765 |
|
766 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { |
652 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { |
767 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; |
653 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; |
768 |
|
654 |
|
Lines 772-808
Link Here
|
772 |
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; |
658 |
sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; |
773 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) |
659 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) |
774 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; |
660 |
sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; |
775 |
|
|
|
776 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { |
777 |
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, |
778 |
offsetof(struct virtio_net_config, |
779 |
max_virtqueue_pairs)); |
780 |
} |
781 |
} |
661 |
} |
782 |
|
662 |
|
783 |
if (sc->vtnet_max_vq_pairs > 1) { |
663 |
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && |
784 |
int req; |
664 |
sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
|
|
665 |
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, |
666 |
offsetof(struct virtio_net_config, max_virtqueue_pairs)); |
667 |
} else |
668 |
sc->vtnet_max_vq_pairs = 1; |
785 |
|
669 |
|
|
|
670 |
if (sc->vtnet_max_vq_pairs > 1) { |
786 |
/* |
671 |
/* |
787 |
* Limit the maximum number of requested queue pairs to the |
672 |
* Limit the maximum number of queue pairs to the lower of |
788 |
* number of CPUs and the configured maximum. |
673 |
* the number of CPUs and the configured maximum. |
|
|
674 |
* The actual number of queues that get used may be less. |
789 |
*/ |
675 |
*/ |
790 |
req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); |
676 |
int max; |
791 |
if (req < 0) |
677 |
|
792 |
req = 1; |
678 |
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); |
793 |
if (req == 0) |
679 |
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) { |
794 |
req = mp_ncpus; |
680 |
if (max > mp_ncpus) |
795 |
if (req > sc->vtnet_max_vq_pairs) |
681 |
max = mp_ncpus; |
796 |
req = sc->vtnet_max_vq_pairs; |
682 |
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) |
797 |
if (req > mp_ncpus) |
683 |
max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX; |
798 |
req = mp_ncpus; |
684 |
if (max > 1) { |
799 |
if (req > 1) { |
685 |
sc->vtnet_requested_vq_pairs = max; |
800 |
sc->vtnet_req_vq_pairs = req; |
686 |
sc->vtnet_flags |= VTNET_FLAG_MULTIQ; |
801 |
sc->vtnet_flags |= VTNET_FLAG_MQ; |
687 |
} |
802 |
} |
688 |
} |
803 |
} |
689 |
} |
804 |
|
|
|
805 |
return (0); |
806 |
} |
690 |
} |
807 |
|
691 |
|
808 |
static int |
692 |
static int |
Lines 823-836
Link Here
|
823 |
if (rxq->vtnrx_sg == NULL) |
707 |
if (rxq->vtnrx_sg == NULL) |
824 |
return (ENOMEM); |
708 |
return (ENOMEM); |
825 |
|
709 |
|
826 |
#if defined(INET) || defined(INET6) |
|
|
827 |
if (vtnet_software_lro(sc)) { |
828 |
if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp, |
829 |
sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0) |
830 |
return (ENOMEM); |
831 |
} |
832 |
#endif |
833 |
|
834 |
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); |
710 |
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); |
835 |
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, |
711 |
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, |
836 |
taskqueue_thread_enqueue, &rxq->vtnrx_tq); |
712 |
taskqueue_thread_enqueue, &rxq->vtnrx_tq); |
Lines 896-902
Link Here
|
896 |
return (error); |
772 |
return (error); |
897 |
} |
773 |
} |
898 |
|
774 |
|
899 |
vtnet_set_rx_process_limit(sc); |
|
|
900 |
vtnet_setup_queue_sysctl(sc); |
775 |
vtnet_setup_queue_sysctl(sc); |
901 |
|
776 |
|
902 |
return (0); |
777 |
return (0); |
Lines 909-918
Link Here
|
909 |
rxq->vtnrx_sc = NULL; |
784 |
rxq->vtnrx_sc = NULL; |
910 |
rxq->vtnrx_id = -1; |
785 |
rxq->vtnrx_id = -1; |
911 |
|
786 |
|
912 |
#if defined(INET) || defined(INET6) |
|
|
913 |
tcp_lro_free(&rxq->vtnrx_lro); |
914 |
#endif |
915 |
|
916 |
if (rxq->vtnrx_sg != NULL) { |
787 |
if (rxq->vtnrx_sg != NULL) { |
917 |
sglist_free(rxq->vtnrx_sg); |
788 |
sglist_free(rxq->vtnrx_sg); |
918 |
rxq->vtnrx_sg = NULL; |
789 |
rxq->vtnrx_sg = NULL; |
Lines 1021-1059
Link Here
|
1021 |
if (info == NULL) |
892 |
if (info == NULL) |
1022 |
return (ENOMEM); |
893 |
return (ENOMEM); |
1023 |
|
894 |
|
1024 |
for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) { |
895 |
for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { |
1025 |
rxq = &sc->vtnet_rxqs[i]; |
896 |
rxq = &sc->vtnet_rxqs[i]; |
1026 |
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, |
897 |
VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, |
1027 |
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, |
898 |
vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, |
1028 |
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); |
899 |
"%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); |
1029 |
|
900 |
|
1030 |
txq = &sc->vtnet_txqs[i]; |
901 |
txq = &sc->vtnet_txqs[i]; |
1031 |
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, |
902 |
VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, |
1032 |
vtnet_tx_vq_intr, txq, &txq->vtntx_vq, |
903 |
vtnet_tx_vq_intr, txq, &txq->vtntx_vq, |
1033 |
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); |
904 |
"%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); |
1034 |
} |
905 |
} |
1035 |
|
906 |
|
1036 |
/* These queues will not be used so allocate the minimum resources. */ |
|
|
1037 |
for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) { |
1038 |
rxq = &sc->vtnet_rxqs[i]; |
1039 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq, |
1040 |
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); |
1041 |
|
1042 |
txq = &sc->vtnet_txqs[i]; |
1043 |
VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq, |
1044 |
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); |
1045 |
} |
1046 |
|
1047 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
907 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { |
1048 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, |
908 |
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, |
1049 |
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); |
909 |
&sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); |
1050 |
} |
910 |
} |
1051 |
|
911 |
|
1052 |
/* |
912 |
/* |
1053 |
* TODO: Enable interrupt binding if this is multiqueue. This will |
913 |
* Enable interrupt binding if this is multiqueue. This only matters |
1054 |
* only matter when per-virtqueue MSIX is available. |
914 |
* when per-vq MSIX is available. |
1055 |
*/ |
915 |
*/ |
1056 |
if (sc->vtnet_flags & VTNET_FLAG_MQ) |
916 |
if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) |
1057 |
flags |= 0; |
917 |
flags |= 0; |
1058 |
|
918 |
|
1059 |
error = virtio_alloc_virtqueues(dev, flags, nvqs, info); |
919 |
error = virtio_alloc_virtqueues(dev, flags, nvqs, info); |
Lines 1063-1097
Link Here
|
1063 |
} |
923 |
} |
1064 |
|
924 |
|
1065 |
static int |
925 |
static int |
1066 |
vtnet_alloc_interface(struct vtnet_softc *sc) |
926 |
vtnet_setup_interface(struct vtnet_softc *sc) |
1067 |
{ |
927 |
{ |
1068 |
device_t dev; |
928 |
device_t dev; |
1069 |
struct ifnet *ifp; |
929 |
struct ifnet *ifp; |
1070 |
|
930 |
|
1071 |
dev = sc->vtnet_dev; |
931 |
dev = sc->vtnet_dev; |
1072 |
|
932 |
|
1073 |
ifp = if_alloc(IFT_ETHER); |
933 |
ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); |
1074 |
if (ifp == NULL) |
934 |
if (ifp == NULL) { |
1075 |
return (ENOMEM); |
935 |
device_printf(dev, "cannot allocate ifnet structure\n"); |
|
|
936 |
return (ENOSPC); |
937 |
} |
1076 |
|
938 |
|
1077 |
sc->vtnet_ifp = ifp; |
|
|
1078 |
ifp->if_softc = sc; |
1079 |
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
939 |
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
1080 |
|
940 |
ifp->if_baudrate = IF_Gbps(10); /* Approx. */ |
1081 |
return (0); |
941 |
ifp->if_softc = sc; |
1082 |
} |
|
|
1083 |
|
1084 |
static int |
1085 |
vtnet_setup_interface(struct vtnet_softc *sc) |
1086 |
{ |
1087 |
device_t dev; |
1088 |
struct ifnet *ifp; |
1089 |
|
1090 |
dev = sc->vtnet_dev; |
1091 |
ifp = sc->vtnet_ifp; |
1092 |
|
1093 |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
942 |
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1094 |
ifp->if_baudrate = IF_Gbps(10); |
|
|
1095 |
ifp->if_init = vtnet_init; |
943 |
ifp->if_init = vtnet_init; |
1096 |
ifp->if_ioctl = vtnet_ioctl; |
944 |
ifp->if_ioctl = vtnet_ioctl; |
1097 |
ifp->if_get_counter = vtnet_get_counter; |
945 |
ifp->if_get_counter = vtnet_get_counter; |
Lines 1106-1163
Link Here
|
1106 |
IFQ_SET_READY(&ifp->if_snd); |
954 |
IFQ_SET_READY(&ifp->if_snd); |
1107 |
#endif |
955 |
#endif |
1108 |
|
956 |
|
1109 |
vtnet_get_macaddr(sc); |
957 |
ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, |
|
|
958 |
vtnet_ifmedia_sts); |
959 |
ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); |
960 |
ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); |
1110 |
|
961 |
|
|
|
962 |
/* Read (or generate) the MAC address for the adapter. */ |
963 |
vtnet_get_hwaddr(sc); |
964 |
|
965 |
ether_ifattach(ifp, sc->vtnet_hwaddr); |
966 |
|
1111 |
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) |
967 |
if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) |
1112 |
ifp->if_capabilities |= IFCAP_LINKSTATE; |
968 |
ifp->if_capabilities |= IFCAP_LINKSTATE; |
1113 |
|
969 |
|
1114 |
ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts); |
970 |
/* Tell the upper layer(s) we support long frames. */ |
1115 |
ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL); |
971 |
ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1116 |
ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO); |
972 |
ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; |
1117 |
|
973 |
|
1118 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { |
974 |
if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { |
1119 |
int gso; |
|
|
1120 |
|
1121 |
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; |
975 |
ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; |
1122 |
|
976 |
|
1123 |
gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO); |
977 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { |
1124 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) |
978 |
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; |
1125 |
ifp->if_capabilities |= IFCAP_TSO4; |
|
|
1126 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
1127 |
ifp->if_capabilities |= IFCAP_TSO6; |
1128 |
if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) |
1129 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
979 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
|
|
980 |
} else { |
981 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) |
982 |
ifp->if_capabilities |= IFCAP_TSO4; |
983 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) |
984 |
ifp->if_capabilities |= IFCAP_TSO6; |
985 |
if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) |
986 |
sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; |
987 |
} |
1130 |
|
988 |
|
1131 |
if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) { |
989 |
if (ifp->if_capabilities & IFCAP_TSO) |
1132 |
int tso_maxlen; |
|
|
1133 |
|
1134 |
ifp->if_capabilities |= IFCAP_VLAN_HWTSO; |
990 |
ifp->if_capabilities |= IFCAP_VLAN_HWTSO; |
1135 |
|
|
|
1136 |
tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen", |
1137 |
vtnet_tso_maxlen); |
1138 |
ifp->if_hw_tsomax = tso_maxlen - |
1139 |
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); |
1140 |
ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1; |
1141 |
ifp->if_hw_tsomaxsegsize = PAGE_SIZE; |
1142 |
} |
1143 |
} |
991 |
} |
1144 |
|
992 |
|
1145 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { |
993 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { |
1146 |
ifp->if_capabilities |= IFCAP_RXCSUM; |
994 |
ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; |
1147 |
#ifdef notyet |
|
|
1148 |
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */ |
1149 |
ifp->if_capabilities |= IFCAP_RXCSUM_IPV6; |
1150 |
#endif |
1151 |
|
995 |
|
1152 |
if (vtnet_tunable_int(sc, "fixup_needs_csum", |
996 |
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || |
1153 |
vtnet_fixup_needs_csum) != 0) |
997 |
virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) |
1154 |
sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM; |
998 |
ifp->if_capabilities |= IFCAP_LRO; |
1155 |
|
|
|
1156 |
/* Support either "hardware" or software LRO. */ |
1157 |
ifp->if_capabilities |= IFCAP_LRO; |
1158 |
} |
999 |
} |
1159 |
|
1000 |
|
1160 |
if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) { |
1001 |
if (ifp->if_capabilities & IFCAP_HWCSUM) { |
1161 |
/* |
1002 |
/* |
1162 |
* VirtIO does not support VLAN tagging, but we can fake |
1003 |
* VirtIO does not support VLAN tagging, but we can fake |
1163 |
* it by inserting and removing the 802.1Q header during |
1004 |
* it by inserting and removing the 802.1Q header during |
Lines 1168-1181
Link Here
|
1168 |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; |
1009 |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; |
1169 |
} |
1010 |
} |
1170 |
|
1011 |
|
1171 |
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO) |
1012 |
ifp->if_capenable = ifp->if_capabilities; |
1172 |
ifp->if_capabilities |= IFCAP_JUMBO_MTU; |
|
|
1173 |
ifp->if_capabilities |= IFCAP_VLAN_MTU; |
1174 |
|
1013 |
|
1175 |
/* |
1014 |
/* |
1176 |
* Capabilities after here are not enabled by default. |
1015 |
* Capabilities after here are not enabled by default. |
1177 |
*/ |
1016 |
*/ |
1178 |
ifp->if_capenable = ifp->if_capabilities; |
|
|
1179 |
|
1017 |
|
1180 |
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { |
1018 |
if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { |
1181 |
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; |
1019 |
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; |
Lines 1186-1401
Link Here
|
1186 |
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); |
1024 |
vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); |
1187 |
} |
1025 |
} |
1188 |
|
1026 |
|
1189 |
ether_ifattach(ifp, sc->vtnet_hwaddr); |
1027 |
vtnet_set_rx_process_limit(sc); |
|
|
1028 |
vtnet_set_tx_intr_threshold(sc); |
1190 |
|
1029 |
|
1191 |
/* Tell the upper layer(s) we support long frames. */ |
|
|
1192 |
ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1193 |
|
1194 |
NETDUMP_SET(ifp, vtnet); |
1030 |
NETDUMP_SET(ifp, vtnet); |
1195 |
|
1031 |
|
1196 |
return (0); |
1032 |
return (0); |
1197 |
} |
1033 |
} |
1198 |
|
1034 |
|
1199 |
static int |
1035 |
static int |
1200 |
vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu) |
1036 |
vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) |
1201 |
{ |
1037 |
{ |
1202 |
int framesz; |
|
|
1203 |
|
1204 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) |
1205 |
return (MJUMPAGESIZE); |
1206 |
else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
1207 |
return (MCLBYTES); |
1208 |
|
1209 |
/* |
1210 |
* Try to scale the receive mbuf cluster size from the MTU. Without |
1211 |
* the GUEST_TSO[46] features, the VirtIO specification says the |
1212 |
* driver must only be able to receive ~1500 byte frames. But if |
1213 |
* jumbo frames can be transmitted then try to receive jumbo. |
1214 |
* |
1215 |
* BMV: Not quite true when F_MTU is negotiated! |
1216 |
*/ |
1217 |
if (vtnet_modern(sc)) { |
1218 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1)); |
1219 |
framesz = sizeof(struct virtio_net_hdr_v1); |
1220 |
} else |
1221 |
framesz = sizeof(struct vtnet_rx_header); |
1222 |
framesz += sizeof(struct ether_vlan_header) + mtu; |
1223 |
|
1224 |
if (framesz <= MCLBYTES) |
1225 |
return (MCLBYTES); |
1226 |
else if (framesz <= MJUMPAGESIZE) |
1227 |
return (MJUMPAGESIZE); |
1228 |
else if (framesz <= MJUM9BYTES) |
1229 |
return (MJUM9BYTES); |
1230 |
|
1231 |
/* Sane default; avoid 16KB clusters. */ |
1232 |
return (MCLBYTES); |
1233 |
} |
1234 |
|
1235 |
static int |
1236 |
vtnet_ioctl_mtu(struct vtnet_softc *sc, int mtu) |
1237 |
{ |
1238 |
struct ifnet *ifp; |
1038 |
struct ifnet *ifp; |
1239 |
int clustersz; |
1039 |
int frame_size, clsize; |
1240 |
|
1040 |
|
1241 |
ifp = sc->vtnet_ifp; |
1041 |
ifp = sc->vtnet_ifp; |
1242 |
VTNET_CORE_LOCK_ASSERT(sc); |
|
|
1243 |
|
1042 |
|
1244 |
if (ifp->if_mtu == mtu) |
1043 |
if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) |
1245 |
return (0); |
|
|
1246 |
else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu) |
1247 |
return (EINVAL); |
1044 |
return (EINVAL); |
1248 |
|
1045 |
|
1249 |
ifp->if_mtu = mtu; |
1046 |
frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + |
1250 |
clustersz = vtnet_rx_cluster_size(sc, mtu); |
1047 |
new_mtu; |
1251 |
|
1048 |
|
1252 |
if (clustersz != sc->vtnet_rx_clustersz && |
1049 |
/* |
1253 |
ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1050 |
* Based on the new MTU (and hence frame size) determine which |
1254 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1051 |
* cluster size is most appropriate for the receive queues. |
1255 |
vtnet_init_locked(sc); |
1052 |
*/ |
1256 |
} |
1053 |
if (frame_size <= MCLBYTES) { |
|
|
1054 |
clsize = MCLBYTES; |
1055 |
} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
1056 |
/* Avoid going past 9K jumbos. */ |
1057 |
if (frame_size > MJUM9BYTES) |
1058 |
return (EINVAL); |
1059 |
clsize = MJUM9BYTES; |
1060 |
} else |
1061 |
clsize = MJUMPAGESIZE; |
1257 |
|
1062 |
|
1258 |
return (0); |
1063 |
ifp->if_mtu = new_mtu; |
1259 |
} |
1064 |
sc->vtnet_rx_new_clsize = clsize; |
1260 |
|
1065 |
|
1261 |
static int |
1066 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1262 |
vtnet_ioctl_ifflags(struct vtnet_softc *sc) |
1067 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1263 |
{ |
|
|
1264 |
struct ifnet *ifp; |
1265 |
int drv_running; |
1266 |
|
1267 |
ifp = sc->vtnet_ifp; |
1268 |
drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; |
1269 |
|
1270 |
VTNET_CORE_LOCK_ASSERT(sc); |
1271 |
|
1272 |
if ((ifp->if_flags & IFF_UP) == 0) { |
1273 |
if (drv_running) |
1274 |
vtnet_stop(sc); |
1275 |
goto out; |
1276 |
} |
1277 |
|
1278 |
if (!drv_running) { |
1279 |
vtnet_init_locked(sc); |
1068 |
vtnet_init_locked(sc); |
1280 |
goto out; |
|
|
1281 |
} |
1069 |
} |
1282 |
|
1070 |
|
1283 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) & |
|
|
1284 |
(IFF_PROMISC | IFF_ALLMULTI)) { |
1285 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) |
1286 |
return (ENOTSUP); |
1287 |
vtnet_rx_filter(sc); |
1288 |
} |
1289 |
|
1290 |
out: |
1291 |
sc->vtnet_if_flags = ifp->if_flags; |
1292 |
return (0); |
1071 |
return (0); |
1293 |
} |
1072 |
} |
1294 |
|
1073 |
|
1295 |
static int |
1074 |
static int |
1296 |
vtnet_ioctl_multi(struct vtnet_softc *sc) |
|
|
1297 |
{ |
1298 |
struct ifnet *ifp; |
1299 |
|
1300 |
ifp = sc->vtnet_ifp; |
1301 |
|
1302 |
VTNET_CORE_LOCK_ASSERT(sc); |
1303 |
|
1304 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX && |
1305 |
ifp->if_drv_flags & IFF_DRV_RUNNING) |
1306 |
vtnet_rx_filter_mac(sc); |
1307 |
|
1308 |
return (0); |
1309 |
} |
1310 |
|
1311 |
static int |
1312 |
vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr) |
1313 |
{ |
1314 |
struct ifnet *ifp; |
1315 |
int mask, reinit, update; |
1316 |
|
1317 |
ifp = sc->vtnet_ifp; |
1318 |
mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable; |
1319 |
reinit = update = 0; |
1320 |
|
1321 |
VTNET_CORE_LOCK_ASSERT(sc); |
1322 |
|
1323 |
if (mask & IFCAP_TXCSUM) |
1324 |
ifp->if_capenable ^= IFCAP_TXCSUM; |
1325 |
if (mask & IFCAP_TXCSUM_IPV6) |
1326 |
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; |
1327 |
if (mask & IFCAP_TSO4) |
1328 |
ifp->if_capenable ^= IFCAP_TSO4; |
1329 |
if (mask & IFCAP_TSO6) |
1330 |
ifp->if_capenable ^= IFCAP_TSO6; |
1331 |
|
1332 |
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) { |
1333 |
/* |
1334 |
* These Rx features require the negotiated features to |
1335 |
* be updated. Avoid a full reinit if possible. |
1336 |
*/ |
1337 |
if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
1338 |
update = 1; |
1339 |
else |
1340 |
reinit = 1; |
1341 |
|
1342 |
/* BMV: Avoid needless renegotiation for just software LRO. */ |
1343 |
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) == |
1344 |
IFCAP_LRO && vtnet_software_lro(sc)) |
1345 |
reinit = update = 0; |
1346 |
|
1347 |
if (mask & IFCAP_RXCSUM) |
1348 |
ifp->if_capenable ^= IFCAP_RXCSUM; |
1349 |
if (mask & IFCAP_RXCSUM_IPV6) |
1350 |
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; |
1351 |
if (mask & IFCAP_LRO) |
1352 |
ifp->if_capenable ^= IFCAP_LRO; |
1353 |
|
1354 |
/* |
1355 |
* VirtIO does not distinguish between IPv4 and IPv6 checksums |
1356 |
* so treat them as a pair. Guest TSO (LRO) requires receive |
1357 |
* checksums. |
1358 |
*/ |
1359 |
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { |
1360 |
ifp->if_capenable |= IFCAP_RXCSUM; |
1361 |
#ifdef notyet |
1362 |
ifp->if_capenable |= IFCAP_RXCSUM_IPV6; |
1363 |
#endif |
1364 |
} else |
1365 |
ifp->if_capenable &= |
1366 |
~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO); |
1367 |
} |
1368 |
|
1369 |
if (mask & IFCAP_VLAN_HWFILTER) { |
1370 |
/* These Rx features require renegotiation. */ |
1371 |
reinit = 1; |
1372 |
|
1373 |
if (mask & IFCAP_VLAN_HWFILTER) |
1374 |
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; |
1375 |
} |
1376 |
|
1377 |
if (mask & IFCAP_VLAN_HWTSO) |
1378 |
ifp->if_capenable ^= IFCAP_VLAN_HWTSO; |
1379 |
if (mask & IFCAP_VLAN_HWTAGGING) |
1380 |
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; |
1381 |
|
1382 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1383 |
if (reinit) { |
1384 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1385 |
vtnet_init_locked(sc); |
1386 |
} else if (update) |
1387 |
vtnet_update_rx_offloads(sc); |
1388 |
} |
1389 |
|
1390 |
return (0); |
1391 |
} |
1392 |
|
1393 |
static int |
1394 |
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1075 |
vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
1395 |
{ |
1076 |
{ |
1396 |
struct vtnet_softc *sc; |
1077 |
struct vtnet_softc *sc; |
1397 |
struct ifreq *ifr; |
1078 |
struct ifreq *ifr; |
1398 |
int error; |
1079 |
int reinit, mask, error; |
1399 |
|
1080 |
|
1400 |
sc = ifp->if_softc; |
1081 |
sc = ifp->if_softc; |
1401 |
ifr = (struct ifreq *) data; |
1082 |
ifr = (struct ifreq *) data; |
Lines 1403-1423
Link Here
|
1403 |
|
1084 |
|
1404 |
switch (cmd) { |
1085 |
switch (cmd) { |
1405 |
case SIOCSIFMTU: |
1086 |
case SIOCSIFMTU: |
1406 |
VTNET_CORE_LOCK(sc); |
1087 |
if (ifp->if_mtu != ifr->ifr_mtu) { |
1407 |
error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu); |
1088 |
VTNET_CORE_LOCK(sc); |
1408 |
VTNET_CORE_UNLOCK(sc); |
1089 |
error = vtnet_change_mtu(sc, ifr->ifr_mtu); |
|
|
1090 |
VTNET_CORE_UNLOCK(sc); |
1091 |
} |
1409 |
break; |
1092 |
break; |
1410 |
|
1093 |
|
1411 |
case SIOCSIFFLAGS: |
1094 |
case SIOCSIFFLAGS: |
1412 |
VTNET_CORE_LOCK(sc); |
1095 |
VTNET_CORE_LOCK(sc); |
1413 |
error = vtnet_ioctl_ifflags(sc); |
1096 |
if ((ifp->if_flags & IFF_UP) == 0) { |
|
|
1097 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
1098 |
vtnet_stop(sc); |
1099 |
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
1100 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) & |
1101 |
(IFF_PROMISC | IFF_ALLMULTI)) { |
1102 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) |
1103 |
vtnet_rx_filter(sc); |
1104 |
else { |
1105 |
ifp->if_flags |= IFF_PROMISC; |
1106 |
if ((ifp->if_flags ^ sc->vtnet_if_flags) |
1107 |
& IFF_ALLMULTI) |
1108 |
error = ENOTSUP; |
1109 |
} |
1110 |
} |
1111 |
} else |
1112 |
vtnet_init_locked(sc); |
1113 |
|
1114 |
if (error == 0) |
1115 |
sc->vtnet_if_flags = ifp->if_flags; |
1414 |
VTNET_CORE_UNLOCK(sc); |
1116 |
VTNET_CORE_UNLOCK(sc); |
1415 |
break; |
1117 |
break; |
1416 |
|
1118 |
|
1417 |
case SIOCADDMULTI: |
1119 |
case SIOCADDMULTI: |
1418 |
case SIOCDELMULTI: |
1120 |
case SIOCDELMULTI: |
|
|
1121 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) |
1122 |
break; |
1419 |
VTNET_CORE_LOCK(sc); |
1123 |
VTNET_CORE_LOCK(sc); |
1420 |
error = vtnet_ioctl_multi(sc); |
1124 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
|
|
1125 |
vtnet_rx_filter_mac(sc); |
1421 |
VTNET_CORE_UNLOCK(sc); |
1126 |
VTNET_CORE_UNLOCK(sc); |
1422 |
break; |
1127 |
break; |
1423 |
|
1128 |
|
Lines 1428-1436
Link Here
|
1428 |
|
1133 |
|
1429 |
case SIOCSIFCAP: |
1134 |
case SIOCSIFCAP: |
1430 |
VTNET_CORE_LOCK(sc); |
1135 |
VTNET_CORE_LOCK(sc); |
1431 |
error = vtnet_ioctl_ifcap(sc, ifr); |
1136 |
mask = ifr->ifr_reqcap ^ ifp->if_capenable; |
|
|
1137 |
|
1138 |
if (mask & IFCAP_TXCSUM) |
1139 |
ifp->if_capenable ^= IFCAP_TXCSUM; |
1140 |
if (mask & IFCAP_TXCSUM_IPV6) |
1141 |
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; |
1142 |
if (mask & IFCAP_TSO4) |
1143 |
ifp->if_capenable ^= IFCAP_TSO4; |
1144 |
if (mask & IFCAP_TSO6) |
1145 |
ifp->if_capenable ^= IFCAP_TSO6; |
1146 |
|
1147 |
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | |
1148 |
IFCAP_VLAN_HWFILTER)) { |
1149 |
/* These Rx features require us to renegotiate. */ |
1150 |
reinit = 1; |
1151 |
|
1152 |
if (mask & IFCAP_RXCSUM) |
1153 |
ifp->if_capenable ^= IFCAP_RXCSUM; |
1154 |
if (mask & IFCAP_RXCSUM_IPV6) |
1155 |
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; |
1156 |
if (mask & IFCAP_LRO) |
1157 |
ifp->if_capenable ^= IFCAP_LRO; |
1158 |
if (mask & IFCAP_VLAN_HWFILTER) |
1159 |
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; |
1160 |
} else |
1161 |
reinit = 0; |
1162 |
|
1163 |
if (mask & IFCAP_VLAN_HWTSO) |
1164 |
ifp->if_capenable ^= IFCAP_VLAN_HWTSO; |
1165 |
if (mask & IFCAP_VLAN_HWTAGGING) |
1166 |
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; |
1167 |
|
1168 |
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { |
1169 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1170 |
vtnet_init_locked(sc); |
1171 |
} |
1172 |
|
1432 |
VTNET_CORE_UNLOCK(sc); |
1173 |
VTNET_CORE_UNLOCK(sc); |
1433 |
VLAN_CAPABILITIES(ifp); |
1174 |
VLAN_CAPABILITIES(ifp); |
|
|
1175 |
|
1434 |
break; |
1176 |
break; |
1435 |
|
1177 |
|
1436 |
default: |
1178 |
default: |
Lines 1449-1454
Link Here
|
1449 |
struct virtqueue *vq; |
1191 |
struct virtqueue *vq; |
1450 |
int nbufs, error; |
1192 |
int nbufs, error; |
1451 |
|
1193 |
|
|
|
1194 |
#ifdef DEV_NETMAP |
1195 |
error = vtnet_netmap_rxq_populate(rxq); |
1196 |
if (error >= 0) |
1197 |
return (error); |
1198 |
#endif /* DEV_NETMAP */ |
1199 |
|
1452 |
vq = rxq->vtnrx_vq; |
1200 |
vq = rxq->vtnrx_vq; |
1453 |
error = ENOSPC; |
1201 |
error = ENOSPC; |
1454 |
|
1202 |
|
Lines 1478-1489
Link Here
|
1478 |
struct virtqueue *vq; |
1226 |
struct virtqueue *vq; |
1479 |
struct mbuf *m; |
1227 |
struct mbuf *m; |
1480 |
int last; |
1228 |
int last; |
|
|
1229 |
#ifdef DEV_NETMAP |
1230 |
int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX, |
1231 |
rxq->vtnrx_id); |
1232 |
#else /* !DEV_NETMAP */ |
1233 |
int netmap_bufs = 0; |
1234 |
#endif /* !DEV_NETMAP */ |
1481 |
|
1235 |
|
1482 |
vq = rxq->vtnrx_vq; |
1236 |
vq = rxq->vtnrx_vq; |
1483 |
last = 0; |
1237 |
last = 0; |
1484 |
|
1238 |
|
1485 |
while ((m = virtqueue_drain(vq, &last)) != NULL) |
1239 |
while ((m = virtqueue_drain(vq, &last)) != NULL) { |
1486 |
m_freem(m); |
1240 |
if (!netmap_bufs) |
|
|
1241 |
m_freem(m); |
1242 |
} |
1487 |
|
1243 |
|
1488 |
KASSERT(virtqueue_empty(vq), |
1244 |
KASSERT(virtqueue_empty(vq), |
1489 |
("%s: mbufs remaining in rx queue %p", __func__, rxq)); |
1245 |
("%s: mbufs remaining in rx queue %p", __func__, rxq)); |
Lines 1493-1541
Link Here
|
1493 |
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) |
1249 |
vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) |
1494 |
{ |
1250 |
{ |
1495 |
struct mbuf *m_head, *m_tail, *m; |
1251 |
struct mbuf *m_head, *m_tail, *m; |
1496 |
int i, size; |
1252 |
int i, clsize; |
1497 |
|
1253 |
|
1498 |
m_head = NULL; |
1254 |
clsize = sc->vtnet_rx_clsize; |
1499 |
size = sc->vtnet_rx_clustersz; |
|
|
1500 |
|
1255 |
|
1501 |
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
1256 |
KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
1502 |
("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs)); |
1257 |
("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); |
1503 |
|
1258 |
|
1504 |
for (i = 0; i < nbufs; i++) { |
1259 |
m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); |
1505 |
m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size); |
1260 |
if (m_head == NULL) |
1506 |
if (m == NULL) { |
1261 |
goto fail; |
1507 |
sc->vtnet_stats.mbuf_alloc_failed++; |
|
|
1508 |
m_freem(m_head); |
1509 |
return (NULL); |
1510 |
} |
1511 |
|
1262 |
|
1512 |
m->m_len = size; |
1263 |
m_head->m_len = clsize; |
1513 |
if (m_head != NULL) { |
1264 |
m_tail = m_head; |
1514 |
m_tail->m_next = m; |
1265 |
|
1515 |
m_tail = m; |
1266 |
/* Allocate the rest of the chain. */ |
1516 |
} else |
1267 |
for (i = 1; i < nbufs; i++) { |
1517 |
m_head = m_tail = m; |
1268 |
m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); |
|
|
1269 |
if (m == NULL) |
1270 |
goto fail; |
1271 |
|
1272 |
m->m_len = clsize; |
1273 |
m_tail->m_next = m; |
1274 |
m_tail = m; |
1518 |
} |
1275 |
} |
1519 |
|
1276 |
|
1520 |
if (m_tailp != NULL) |
1277 |
if (m_tailp != NULL) |
1521 |
*m_tailp = m_tail; |
1278 |
*m_tailp = m_tail; |
1522 |
|
1279 |
|
1523 |
return (m_head); |
1280 |
return (m_head); |
|
|
1281 |
|
1282 |
fail: |
1283 |
sc->vtnet_stats.mbuf_alloc_failed++; |
1284 |
m_freem(m_head); |
1285 |
|
1286 |
return (NULL); |
1524 |
} |
1287 |
} |
1525 |
|
1288 |
|
1526 |
/* |
1289 |
/* |
1527 |
* Slow path for when LRO without mergeable buffers is negotiated. |
1290 |
* Slow path for when LRO without mergeable buffers is negotiated. |
1528 |
*/ |
1291 |
*/ |
1529 |
static int |
1292 |
static int |
1530 |
vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0, |
1293 |
vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, |
1531 |
int len0) |
1294 |
int len0) |
1532 |
{ |
1295 |
{ |
1533 |
struct vtnet_softc *sc; |
1296 |
struct vtnet_softc *sc; |
1534 |
struct mbuf *m, *m_prev, *m_new, *m_tail; |
1297 |
struct mbuf *m, *m_prev; |
1535 |
int len, clustersz, nreplace, error; |
1298 |
struct mbuf *m_new, *m_tail; |
|
|
1299 |
int len, clsize, nreplace, error; |
1536 |
|
1300 |
|
1537 |
sc = rxq->vtnrx_sc; |
1301 |
sc = rxq->vtnrx_sc; |
1538 |
clustersz = sc->vtnet_rx_clustersz; |
1302 |
clsize = sc->vtnet_rx_clsize; |
1539 |
|
1303 |
|
1540 |
m_prev = NULL; |
1304 |
m_prev = NULL; |
1541 |
m_tail = NULL; |
1305 |
m_tail = NULL; |
Lines 1545-1567
Link Here
|
1545 |
len = len0; |
1309 |
len = len0; |
1546 |
|
1310 |
|
1547 |
/* |
1311 |
/* |
1548 |
* Since these mbuf chains are so large, avoid allocating a complete |
1312 |
* Since these mbuf chains are so large, we avoid allocating an |
1549 |
* replacement when the received frame did not consume the entire |
1313 |
* entire replacement chain if possible. When the received frame |
1550 |
* chain. Unused mbufs are moved to the tail of the replacement mbuf. |
1314 |
* did not consume the entire chain, the unused mbufs are moved |
|
|
1315 |
* to the replacement chain. |
1551 |
*/ |
1316 |
*/ |
1552 |
while (len > 0) { |
1317 |
while (len > 0) { |
|
|
1318 |
/* |
1319 |
* Something is seriously wrong if we received a frame |
1320 |
* larger than the chain. Drop it. |
1321 |
*/ |
1553 |
if (m == NULL) { |
1322 |
if (m == NULL) { |
1554 |
sc->vtnet_stats.rx_frame_too_large++; |
1323 |
sc->vtnet_stats.rx_frame_too_large++; |
1555 |
return (EMSGSIZE); |
1324 |
return (EMSGSIZE); |
1556 |
} |
1325 |
} |
1557 |
|
1326 |
|
1558 |
/* |
1327 |
/* We always allocate the same cluster size. */ |
1559 |
* Every mbuf should have the expected cluster size sincethat |
1328 |
KASSERT(m->m_len == clsize, |
1560 |
* is also used to allocate the replacements. |
1329 |
("%s: mbuf size %d is not the cluster size %d", |
1561 |
*/ |
1330 |
__func__, m->m_len, clsize)); |
1562 |
KASSERT(m->m_len == clustersz, |
|
|
1563 |
("%s: mbuf size %d not expected cluster size %d", __func__, |
1564 |
m->m_len, clustersz)); |
1565 |
|
1331 |
|
1566 |
m->m_len = MIN(m->m_len, len); |
1332 |
m->m_len = MIN(m->m_len, len); |
1567 |
len -= m->m_len; |
1333 |
len -= m->m_len; |
Lines 1571-1589
Link Here
|
1571 |
nreplace++; |
1337 |
nreplace++; |
1572 |
} |
1338 |
} |
1573 |
|
1339 |
|
1574 |
KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs, |
1340 |
KASSERT(nreplace <= sc->vtnet_rx_nmbufs, |
1575 |
("%s: invalid replacement mbuf count %d max %d", __func__, |
1341 |
("%s: too many replacement mbufs %d max %d", __func__, nreplace, |
1576 |
nreplace, sc->vtnet_rx_nmbufs)); |
1342 |
sc->vtnet_rx_nmbufs)); |
1577 |
|
1343 |
|
1578 |
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); |
1344 |
m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); |
1579 |
if (m_new == NULL) { |
1345 |
if (m_new == NULL) { |
1580 |
m_prev->m_len = clustersz; |
1346 |
m_prev->m_len = clsize; |
1581 |
return (ENOBUFS); |
1347 |
return (ENOBUFS); |
1582 |
} |
1348 |
} |
1583 |
|
1349 |
|
1584 |
/* |
1350 |
/* |
1585 |
* Move any unused mbufs from the received mbuf chain onto the |
1351 |
* Move any unused mbufs from the received chain onto the end |
1586 |
* end of the replacement chain. |
1352 |
* of the new chain. |
1587 |
*/ |
1353 |
*/ |
1588 |
if (m_prev->m_next != NULL) { |
1354 |
if (m_prev->m_next != NULL) { |
1589 |
m_tail->m_next = m_prev->m_next; |
1355 |
m_tail->m_next = m_prev->m_next; |
Lines 1593-1610
Link Here
|
1593 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1359 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1594 |
if (error) { |
1360 |
if (error) { |
1595 |
/* |
1361 |
/* |
1596 |
* The replacement is suppose to be an copy of the one |
1362 |
* BAD! We could not enqueue the replacement mbuf chain. We |
1597 |
* dequeued so this is a very unexpected error. |
1363 |
* must restore the m0 chain to the original state if it was |
|
|
1364 |
* modified so we can subsequently discard it. |
1598 |
* |
1365 |
* |
1599 |
* Restore the m0 chain to the original state if it was |
1366 |
* NOTE: The replacement is suppose to be an identical copy |
1600 |
* modified so we can then discard it. |
1367 |
* to the one just dequeued so this is an unexpected error. |
1601 |
*/ |
1368 |
*/ |
|
|
1369 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1370 |
|
1602 |
if (m_tail->m_next != NULL) { |
1371 |
if (m_tail->m_next != NULL) { |
1603 |
m_prev->m_next = m_tail->m_next; |
1372 |
m_prev->m_next = m_tail->m_next; |
1604 |
m_tail->m_next = NULL; |
1373 |
m_tail->m_next = NULL; |
1605 |
} |
1374 |
} |
1606 |
m_prev->m_len = clustersz; |
1375 |
|
1607 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1376 |
m_prev->m_len = clsize; |
1608 |
m_freem(m_new); |
1377 |
m_freem(m_new); |
1609 |
} |
1378 |
} |
1610 |
|
1379 |
|
Lines 1620-1642
Link Here
|
1620 |
|
1389 |
|
1621 |
sc = rxq->vtnrx_sc; |
1390 |
sc = rxq->vtnrx_sc; |
1622 |
|
1391 |
|
1623 |
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) |
1392 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, |
1624 |
return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len)); |
1393 |
("%s: chained mbuf without LRO_NOMRG", __func__)); |
1625 |
|
1394 |
|
1626 |
MPASS(m->m_next == NULL); |
1395 |
if (m->m_next == NULL) { |
1627 |
if (m->m_len < len) |
1396 |
/* Fast-path for the common case of just one mbuf. */ |
1628 |
return (EMSGSIZE); |
1397 |
if (m->m_len < len) |
|
|
1398 |
return (EINVAL); |
1629 |
|
1399 |
|
1630 |
m_new = vtnet_rx_alloc_buf(sc, 1, NULL); |
1400 |
m_new = vtnet_rx_alloc_buf(sc, 1, NULL); |
1631 |
if (m_new == NULL) |
1401 |
if (m_new == NULL) |
1632 |
return (ENOBUFS); |
1402 |
return (ENOBUFS); |
1633 |
|
1403 |
|
1634 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1404 |
error = vtnet_rxq_enqueue_buf(rxq, m_new); |
1635 |
if (error) { |
1405 |
if (error) { |
1636 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1406 |
/* |
1637 |
m_freem(m_new); |
1407 |
* The new mbuf is suppose to be an identical |
|
|
1408 |
* copy of the one just dequeued so this is an |
1409 |
* unexpected error. |
1410 |
*/ |
1411 |
m_freem(m_new); |
1412 |
sc->vtnet_stats.rx_enq_replacement_failed++; |
1413 |
} else |
1414 |
m->m_len = len; |
1638 |
} else |
1415 |
} else |
1639 |
m->m_len = len; |
1416 |
error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); |
1640 |
|
1417 |
|
1641 |
return (error); |
1418 |
return (error); |
1642 |
} |
1419 |
} |
Lines 1646-1688
Link Here
|
1646 |
{ |
1423 |
{ |
1647 |
struct vtnet_softc *sc; |
1424 |
struct vtnet_softc *sc; |
1648 |
struct sglist *sg; |
1425 |
struct sglist *sg; |
1649 |
int header_inlined, error; |
1426 |
struct vtnet_rx_header *rxhdr; |
|
|
1427 |
uint8_t *mdata; |
1428 |
int offset, error; |
1650 |
|
1429 |
|
1651 |
sc = rxq->vtnrx_sc; |
1430 |
sc = rxq->vtnrx_sc; |
1652 |
sg = rxq->vtnrx_sg; |
1431 |
sg = rxq->vtnrx_sg; |
|
|
1432 |
mdata = mtod(m, uint8_t *); |
1653 |
|
1433 |
|
1654 |
KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, |
|
|
1655 |
("%s: mbuf chain without LRO_NOMRG", __func__)); |
1656 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
1434 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
|
|
1435 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, |
1436 |
("%s: chained mbuf without LRO_NOMRG", __func__)); |
1437 |
KASSERT(m->m_len == sc->vtnet_rx_clsize, |
1438 |
("%s: unexpected cluster size %d/%d", __func__, m->m_len, |
1439 |
sc->vtnet_rx_clsize)); |
1657 |
|
1440 |
|
1658 |
sglist_reset(sg); |
1441 |
sglist_reset(sg); |
1659 |
header_inlined = vtnet_modern(sc) || |
1442 |
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
1660 |
(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */ |
|
|
1661 |
|
1662 |
if (header_inlined) |
1663 |
error = sglist_append_mbuf(sg, m); |
1664 |
else { |
1665 |
struct vtnet_rx_header *rxhdr = |
1666 |
mtod(m, struct vtnet_rx_header *); |
1667 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); |
1443 |
MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); |
|
|
1444 |
rxhdr = (struct vtnet_rx_header *) mdata; |
1445 |
sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); |
1446 |
offset = sizeof(struct vtnet_rx_header); |
1447 |
} else |
1448 |
offset = 0; |
1668 |
|
1449 |
|
1669 |
/* Append the header and remaining mbuf data. */ |
1450 |
sglist_append(sg, mdata + offset, m->m_len - offset); |
1670 |
error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); |
1451 |
if (m->m_next != NULL) { |
1671 |
if (error) |
1452 |
error = sglist_append_mbuf(sg, m->m_next); |
1672 |
return (error); |
1453 |
MPASS(error == 0); |
1673 |
error = sglist_append(sg, &rxhdr[1], |
|
|
1674 |
m->m_len - sizeof(struct vtnet_rx_header)); |
1675 |
if (error) |
1676 |
return (error); |
1677 |
|
1678 |
if (m->m_next != NULL) |
1679 |
error = sglist_append_mbuf(sg, m->m_next); |
1680 |
} |
1454 |
} |
1681 |
|
1455 |
|
1682 |
if (error) |
1456 |
error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); |
1683 |
return (error); |
|
|
1684 |
|
1457 |
|
1685 |
return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg)); |
1458 |
return (error); |
1686 |
} |
1459 |
} |
1687 |
|
1460 |
|
1688 |
static int |
1461 |
static int |
Lines 1705-1777
Link Here
|
1705 |
return (error); |
1478 |
return (error); |
1706 |
} |
1479 |
} |
1707 |
|
1480 |
|
|
|
1481 |
/* |
1482 |
* Use the checksum offset in the VirtIO header to set the |
1483 |
* correct CSUM_* flags. |
1484 |
*/ |
1708 |
static int |
1485 |
static int |
1709 |
vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype, |
1486 |
vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, |
1710 |
int hoff, struct virtio_net_hdr *hdr) |
1487 |
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) |
1711 |
{ |
1488 |
{ |
1712 |
struct vtnet_softc *sc; |
1489 |
struct vtnet_softc *sc; |
1713 |
int error; |
1490 |
#if defined(INET) || defined(INET6) |
|
|
1491 |
int offset = hdr->csum_start + hdr->csum_offset; |
1492 |
#endif |
1714 |
|
1493 |
|
1715 |
sc = rxq->vtnrx_sc; |
1494 |
sc = rxq->vtnrx_sc; |
1716 |
|
1495 |
|
1717 |
/* |
1496 |
/* Only do a basic sanity check on the offset. */ |
1718 |
* NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does |
1497 |
switch (eth_type) { |
1719 |
* not have an analogous CSUM flag. The checksum has been validated, |
1498 |
#if defined(INET) |
1720 |
* but is incomplete (TCP/UDP pseudo header). |
1499 |
case ETHERTYPE_IP: |
1721 |
* |
1500 |
if (__predict_false(offset < ip_start + sizeof(struct ip))) |
1722 |
* The packet is likely from another VM on the same host that itself |
1501 |
return (1); |
1723 |
* performed checksum offloading so Tx/Rx is basically a memcpy and |
1502 |
break; |
1724 |
* the checksum has little value. |
1503 |
#endif |
1725 |
* |
1504 |
#if defined(INET6) |
1726 |
* Default to receiving the packet as-is for performance reasons, but |
1505 |
case ETHERTYPE_IPV6: |
1727 |
* this can cause issues if the packet is to be forwarded because it |
1506 |
if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) |
1728 |
* does not contain a valid checksum. This patch may be helpful: |
1507 |
return (1); |
1729 |
* https://reviews.freebsd.org/D6611. In the meantime, have the driver |
1508 |
break; |
1730 |
* compute the checksum if requested. |
1509 |
#endif |
1731 |
* |
1510 |
default: |
1732 |
* BMV: Need to add an CSUM_PARTIAL flag? |
1511 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1733 |
*/ |
1512 |
return (1); |
1734 |
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) { |
|
|
1735 |
error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr); |
1736 |
return (error); |
1737 |
} |
1513 |
} |
1738 |
|
1514 |
|
1739 |
/* |
1515 |
/* |
1740 |
* Compute the checksum in the driver so the packet will contain a |
1516 |
* Use the offset to determine the appropriate CSUM_* flags. This is |
1741 |
* valid checksum. The checksum is at csum_offset from csum_start. |
1517 |
* a bit dirty, but we can get by with it since the checksum offsets |
|
|
1518 |
* happen to be different. We assume the host host does not do IPv4 |
1519 |
* header checksum offloading. |
1742 |
*/ |
1520 |
*/ |
1743 |
switch (etype) { |
1521 |
switch (hdr->csum_offset) { |
1744 |
#if defined(INET) || defined(INET6) |
1522 |
case offsetof(struct udphdr, uh_sum): |
1745 |
case ETHERTYPE_IP: |
1523 |
case offsetof(struct tcphdr, th_sum): |
1746 |
case ETHERTYPE_IPV6: { |
|
|
1747 |
int csum_off, csum_end; |
1748 |
uint16_t csum; |
1749 |
|
1750 |
csum_off = hdr->csum_start + hdr->csum_offset; |
1751 |
csum_end = csum_off + sizeof(uint16_t); |
1752 |
|
1753 |
/* Assume checksum will be in the first mbuf. */ |
1754 |
if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) |
1755 |
return (1); |
1756 |
|
1757 |
/* |
1758 |
* Like in_delayed_cksum()/in6_delayed_cksum(), compute the |
1759 |
* checksum and write it at the specified offset. We could |
1760 |
* try to verify the packet: csum_start should probably |
1761 |
* correspond to the start of the TCP/UDP header. |
1762 |
* |
1763 |
* BMV: Need to properly handle UDP with zero checksum. Is |
1764 |
* the IPv4 header checksum implicitly validated? |
1765 |
*/ |
1766 |
csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); |
1767 |
*(uint16_t *)(mtodo(m, csum_off)) = csum; |
1768 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1524 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1769 |
m->m_pkthdr.csum_data = 0xFFFF; |
1525 |
m->m_pkthdr.csum_data = 0xFFFF; |
1770 |
break; |
1526 |
break; |
1771 |
} |
|
|
1772 |
#endif |
1773 |
default: |
1527 |
default: |
1774 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1528 |
sc->vtnet_stats.rx_csum_bad_offset++; |
1775 |
return (1); |
1529 |
return (1); |
1776 |
} |
1530 |
} |
1777 |
|
1531 |
|
Lines 1779-1833
Link Here
|
1779 |
} |
1533 |
} |
1780 |
|
1534 |
|
1781 |
static int |
1535 |
static int |
1782 |
vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, |
1536 |
vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, |
1783 |
uint16_t etype, int hoff, struct virtio_net_hdr *hdr) |
1537 |
uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) |
1784 |
{ |
1538 |
{ |
1785 |
struct vtnet_softc *sc; |
1539 |
struct vtnet_softc *sc; |
1786 |
int protocol; |
1540 |
int offset, proto; |
1787 |
|
1541 |
|
1788 |
sc = rxq->vtnrx_sc; |
1542 |
sc = rxq->vtnrx_sc; |
1789 |
|
1543 |
|
1790 |
switch (etype) { |
1544 |
switch (eth_type) { |
1791 |
#if defined(INET) |
1545 |
#if defined(INET) |
1792 |
case ETHERTYPE_IP: |
1546 |
case ETHERTYPE_IP: { |
1793 |
if (__predict_false(m->m_len < hoff + sizeof(struct ip))) |
1547 |
struct ip *ip; |
1794 |
protocol = IPPROTO_DONE; |
1548 |
if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) |
1795 |
else { |
1549 |
return (1); |
1796 |
struct ip *ip = (struct ip *)(m->m_data + hoff); |
1550 |
ip = (struct ip *)(m->m_data + ip_start); |
1797 |
protocol = ip->ip_p; |
1551 |
proto = ip->ip_p; |
1798 |
} |
1552 |
offset = ip_start + (ip->ip_hl << 2); |
1799 |
break; |
1553 |
break; |
|
|
1554 |
} |
1800 |
#endif |
1555 |
#endif |
1801 |
#if defined(INET6) |
1556 |
#if defined(INET6) |
1802 |
case ETHERTYPE_IPV6: |
1557 |
case ETHERTYPE_IPV6: |
1803 |
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr)) |
1558 |
if (__predict_false(m->m_len < ip_start + |
1804 |
|| ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) |
1559 |
sizeof(struct ip6_hdr))) |
1805 |
protocol = IPPROTO_DONE; |
1560 |
return (1); |
|
|
1561 |
offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); |
1562 |
if (__predict_false(offset < 0)) |
1563 |
return (1); |
1806 |
break; |
1564 |
break; |
1807 |
#endif |
1565 |
#endif |
1808 |
default: |
1566 |
default: |
1809 |
protocol = IPPROTO_DONE; |
1567 |
sc->vtnet_stats.rx_csum_bad_ethtype++; |
1810 |
break; |
1568 |
return (1); |
1811 |
} |
1569 |
} |
1812 |
|
1570 |
|
1813 |
switch (protocol) { |
1571 |
switch (proto) { |
1814 |
case IPPROTO_TCP: |
1572 |
case IPPROTO_TCP: |
|
|
1573 |
if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) |
1574 |
return (1); |
1575 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1576 |
m->m_pkthdr.csum_data = 0xFFFF; |
1577 |
break; |
1815 |
case IPPROTO_UDP: |
1578 |
case IPPROTO_UDP: |
|
|
1579 |
if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) |
1580 |
return (1); |
1816 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1581 |
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
1817 |
m->m_pkthdr.csum_data = 0xFFFF; |
1582 |
m->m_pkthdr.csum_data = 0xFFFF; |
1818 |
break; |
1583 |
break; |
1819 |
default: |
1584 |
default: |
1820 |
/* |
1585 |
/* |
1821 |
* FreeBSD does not support checksum offloading of this |
1586 |
* For the remaining protocols, FreeBSD does not support |
1822 |
* protocol. Let the stack re-verify the checksum later |
1587 |
* checksum offloading, so the checksum will be recomputed. |
1823 |
* if the protocol is supported. |
|
|
1824 |
*/ |
1588 |
*/ |
1825 |
#if 0 |
1589 |
#if 0 |
1826 |
if_printf(sc->vtnet_ifp, |
1590 |
if_printf(sc->vtnet_ifp, "cksum offload of unsupported " |
1827 |
"%s: checksum offload of unsupported protocol " |
1591 |
"protocol eth_type=%#x proto=%d csum_start=%d " |
1828 |
"etype=%#x protocol=%d csum_start=%d csum_offset=%d\n", |
1592 |
"csum_offset=%d\n", __func__, eth_type, proto, |
1829 |
__func__, etype, protocol, hdr->csum_start, |
1593 |
hdr->csum_start, hdr->csum_offset); |
1830 |
hdr->csum_offset); |
|
|
1831 |
#endif |
1594 |
#endif |
1832 |
break; |
1595 |
break; |
1833 |
} |
1596 |
} |
Lines 1835-1863
Link Here
|
1835 |
return (0); |
1598 |
return (0); |
1836 |
} |
1599 |
} |
1837 |
|
1600 |
|
|
|
1601 |
/* |
1602 |
* Set the appropriate CSUM_* flags. Unfortunately, the information |
1603 |
* provided is not directly useful to us. The VirtIO header gives the |
1604 |
* offset of the checksum, which is all Linux needs, but this is not |
1605 |
* how FreeBSD does things. We are forced to peek inside the packet |
1606 |
* a bit. |
1607 |
* |
1608 |
* It would be nice if VirtIO gave us the L4 protocol or if FreeBSD |
1609 |
* could accept the offsets and let the stack figure it out. |
1610 |
*/ |
1838 |
static int |
1611 |
static int |
1839 |
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, |
1612 |
vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, |
1840 |
struct virtio_net_hdr *hdr) |
1613 |
struct virtio_net_hdr *hdr) |
1841 |
{ |
1614 |
{ |
1842 |
const struct ether_header *eh; |
1615 |
struct ether_header *eh; |
1843 |
int hoff; |
1616 |
struct ether_vlan_header *evh; |
1844 |
uint16_t etype; |
1617 |
uint16_t eth_type; |
|
|
1618 |
int offset, error; |
1845 |
|
1619 |
|
1846 |
eh = mtod(m, const struct ether_header *); |
1620 |
eh = mtod(m, struct ether_header *); |
1847 |
etype = ntohs(eh->ether_type); |
1621 |
eth_type = ntohs(eh->ether_type); |
1848 |
if (etype == ETHERTYPE_VLAN) { |
1622 |
if (eth_type == ETHERTYPE_VLAN) { |
1849 |
/* TODO BMV: Handle QinQ. */ |
1623 |
/* BMV: We should handle nested VLAN tags too. */ |
1850 |
const struct ether_vlan_header *evh = |
1624 |
evh = mtod(m, struct ether_vlan_header *); |
1851 |
mtod(m, const struct ether_vlan_header *); |
1625 |
eth_type = ntohs(evh->evl_proto); |
1852 |
etype = ntohs(evh->evl_proto); |
1626 |
offset = sizeof(struct ether_vlan_header); |
1853 |
hoff = sizeof(struct ether_vlan_header); |
|
|
1854 |
} else |
1627 |
} else |
1855 |
hoff = sizeof(struct ether_header); |
1628 |
offset = sizeof(struct ether_header); |
1856 |
|
1629 |
|
1857 |
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) |
1630 |
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) |
1858 |
return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr)); |
1631 |
error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); |
1859 |
else /* VIRTIO_NET_HDR_F_DATA_VALID */ |
1632 |
else |
1860 |
return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr)); |
1633 |
error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); |
|
|
1634 |
|
1635 |
return (error); |
1861 |
} |
1636 |
} |
1862 |
|
1637 |
|
1863 |
static void |
1638 |
static void |
Lines 1892-1907
Link Here
|
1892 |
{ |
1667 |
{ |
1893 |
struct vtnet_softc *sc; |
1668 |
struct vtnet_softc *sc; |
1894 |
struct virtqueue *vq; |
1669 |
struct virtqueue *vq; |
1895 |
struct mbuf *m_tail; |
1670 |
struct mbuf *m, *m_tail; |
|
|
1671 |
int len; |
1896 |
|
1672 |
|
1897 |
sc = rxq->vtnrx_sc; |
1673 |
sc = rxq->vtnrx_sc; |
1898 |
vq = rxq->vtnrx_vq; |
1674 |
vq = rxq->vtnrx_vq; |
1899 |
m_tail = m_head; |
1675 |
m_tail = m_head; |
1900 |
|
1676 |
|
1901 |
while (--nbufs > 0) { |
1677 |
while (--nbufs > 0) { |
1902 |
struct mbuf *m; |
|
|
1903 |
int len; |
1904 |
|
1905 |
m = virtqueue_dequeue(vq, &len); |
1678 |
m = virtqueue_dequeue(vq, &len); |
1906 |
if (m == NULL) { |
1679 |
if (m == NULL) { |
1907 |
rxq->vtnrx_stats.vrxs_ierrors++; |
1680 |
rxq->vtnrx_stats.vrxs_ierrors++; |
Lines 1936-1970
Link Here
|
1936 |
return (1); |
1709 |
return (1); |
1937 |
} |
1710 |
} |
1938 |
|
1711 |
|
1939 |
#if defined(INET) || defined(INET6) |
|
|
1940 |
static int |
1941 |
vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m) |
1942 |
{ |
1943 |
struct lro_ctrl *lro; |
1944 |
|
1945 |
lro = &rxq->vtnrx_lro; |
1946 |
|
1947 |
if (lro->lro_mbuf_max != 0) { |
1948 |
tcp_lro_queue_mbuf(lro, m); |
1949 |
return (0); |
1950 |
} |
1951 |
|
1952 |
return (tcp_lro_rx(lro, m, 0)); |
1953 |
} |
1954 |
#endif |
1955 |
|
1956 |
static void |
1712 |
static void |
1957 |
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, |
1713 |
vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, |
1958 |
struct virtio_net_hdr *hdr) |
1714 |
struct virtio_net_hdr *hdr) |
1959 |
{ |
1715 |
{ |
1960 |
struct vtnet_softc *sc; |
1716 |
struct vtnet_softc *sc; |
1961 |
struct ifnet *ifp; |
1717 |
struct ifnet *ifp; |
|
|
1718 |
struct ether_header *eh; |
1962 |
|
1719 |
|
1963 |
sc = rxq->vtnrx_sc; |
1720 |
sc = rxq->vtnrx_sc; |
1964 |
ifp = sc->vtnet_ifp; |
1721 |
ifp = sc->vtnet_ifp; |
1965 |
|
1722 |
|
1966 |
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { |
1723 |
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { |
1967 |
struct ether_header *eh = mtod(m, struct ether_header *); |
1724 |
eh = mtod(m, struct ether_header *); |
1968 |
if (eh->ether_type == htons(ETHERTYPE_VLAN)) { |
1725 |
if (eh->ether_type == htons(ETHERTYPE_VLAN)) { |
1969 |
vtnet_vlan_tag_remove(m); |
1726 |
vtnet_vlan_tag_remove(m); |
1970 |
/* |
1727 |
/* |
Lines 1979-2014
Link Here
|
1979 |
m->m_pkthdr.flowid = rxq->vtnrx_id; |
1736 |
m->m_pkthdr.flowid = rxq->vtnrx_id; |
1980 |
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); |
1737 |
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); |
1981 |
|
1738 |
|
1982 |
if (hdr->flags & |
1739 |
/* |
1983 |
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { |
1740 |
* BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum |
|
|
1741 |
* distinction that Linux does. Need to reevaluate if performing |
1742 |
* offloading for the NEEDS_CSUM case is really appropriate. |
1743 |
*/ |
1744 |
if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | |
1745 |
VIRTIO_NET_HDR_F_DATA_VALID)) { |
1984 |
if (vtnet_rxq_csum(rxq, m, hdr) == 0) |
1746 |
if (vtnet_rxq_csum(rxq, m, hdr) == 0) |
1985 |
rxq->vtnrx_stats.vrxs_csum++; |
1747 |
rxq->vtnrx_stats.vrxs_csum++; |
1986 |
else |
1748 |
else |
1987 |
rxq->vtnrx_stats.vrxs_csum_failed++; |
1749 |
rxq->vtnrx_stats.vrxs_csum_failed++; |
1988 |
} |
1750 |
} |
1989 |
|
1751 |
|
1990 |
if (hdr->gso_size != 0) { |
|
|
1991 |
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
1992 |
case VIRTIO_NET_HDR_GSO_TCPV4: |
1993 |
case VIRTIO_NET_HDR_GSO_TCPV6: |
1994 |
m->m_pkthdr.lro_nsegs = |
1995 |
howmany(m->m_pkthdr.len, hdr->gso_size); |
1996 |
rxq->vtnrx_stats.vrxs_host_lro++; |
1997 |
break; |
1998 |
} |
1999 |
} |
2000 |
|
2001 |
rxq->vtnrx_stats.vrxs_ipackets++; |
1752 |
rxq->vtnrx_stats.vrxs_ipackets++; |
2002 |
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; |
1753 |
rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; |
2003 |
|
1754 |
|
2004 |
#if defined(INET) || defined(INET6) |
1755 |
VTNET_RXQ_UNLOCK(rxq); |
2005 |
if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) { |
|
|
2006 |
if (vtnet_lro_rx(rxq, m) == 0) |
2007 |
return; |
2008 |
} |
2009 |
#endif |
2010 |
|
2011 |
(*ifp->if_input)(ifp, m); |
1756 |
(*ifp->if_input)(ifp, m); |
|
|
1757 |
VTNET_RXQ_LOCK(rxq); |
2012 |
} |
1758 |
} |
2013 |
|
1759 |
|
2014 |
static int |
1760 |
static int |
Lines 2018-2042
Link Here
|
2018 |
struct vtnet_softc *sc; |
1764 |
struct vtnet_softc *sc; |
2019 |
struct ifnet *ifp; |
1765 |
struct ifnet *ifp; |
2020 |
struct virtqueue *vq; |
1766 |
struct virtqueue *vq; |
2021 |
int deq, count; |
1767 |
struct mbuf *m; |
|
|
1768 |
struct virtio_net_hdr_mrg_rxbuf *mhdr; |
1769 |
int len, deq, nbufs, adjsz, count; |
2022 |
|
1770 |
|
2023 |
sc = rxq->vtnrx_sc; |
1771 |
sc = rxq->vtnrx_sc; |
2024 |
vq = rxq->vtnrx_vq; |
1772 |
vq = rxq->vtnrx_vq; |
2025 |
ifp = sc->vtnet_ifp; |
1773 |
ifp = sc->vtnet_ifp; |
|
|
1774 |
hdr = &lhdr; |
2026 |
deq = 0; |
1775 |
deq = 0; |
2027 |
count = sc->vtnet_rx_process_limit; |
1776 |
count = sc->vtnet_rx_process_limit; |
2028 |
|
1777 |
|
2029 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
1778 |
VTNET_RXQ_LOCK_ASSERT(rxq); |
2030 |
|
1779 |
|
2031 |
#ifdef DEV_NETMAP |
|
|
2032 |
if (netmap_rx_irq(ifp, 0, &deq)) |
2033 |
return (0); |
2034 |
#endif |
2035 |
|
2036 |
while (count-- > 0) { |
1780 |
while (count-- > 0) { |
2037 |
struct mbuf *m; |
|
|
2038 |
int len, nbufs, adjsz; |
2039 |
|
2040 |
m = virtqueue_dequeue(vq, &len); |
1781 |
m = virtqueue_dequeue(vq, &len); |
2041 |
if (m == NULL) |
1782 |
if (m == NULL) |
2042 |
break; |
1783 |
break; |
Lines 2048-2069
Link Here
|
2048 |
continue; |
1789 |
continue; |
2049 |
} |
1790 |
} |
2050 |
|
1791 |
|
2051 |
if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) { |
1792 |
if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { |
2052 |
struct virtio_net_hdr_mrg_rxbuf *mhdr = |
|
|
2053 |
mtod(m, struct virtio_net_hdr_mrg_rxbuf *); |
2054 |
nbufs = vtnet_htog16(sc, mhdr->num_buffers); |
2055 |
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
2056 |
} else if (vtnet_modern(sc)) { |
2057 |
nbufs = 1; /* num_buffers is always 1 */ |
2058 |
adjsz = sizeof(struct virtio_net_hdr_v1); |
2059 |
} else { |
2060 |
nbufs = 1; |
1793 |
nbufs = 1; |
2061 |
adjsz = sizeof(struct vtnet_rx_header); |
1794 |
adjsz = sizeof(struct vtnet_rx_header); |
2062 |
/* |
1795 |
/* |
2063 |
* Account for our gap between the header and start of |
1796 |
* Account for our pad inserted between the header |
2064 |
* data to keep the segments separated. |
1797 |
* and the actual start of the frame. |
2065 |
*/ |
1798 |
*/ |
2066 |
len += VTNET_RX_HEADER_PAD; |
1799 |
len += VTNET_RX_HEADER_PAD; |
|
|
1800 |
} else { |
1801 |
mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); |
1802 |
nbufs = mhdr->num_buffers; |
1803 |
adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
2067 |
} |
1804 |
} |
2068 |
|
1805 |
|
2069 |
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { |
1806 |
if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { |
Lines 2085-2113
Link Here
|
2085 |
} |
1822 |
} |
2086 |
|
1823 |
|
2087 |
/* |
1824 |
/* |
2088 |
* Save an endian swapped version of the header prior to it |
1825 |
* Save copy of header before we strip it. For both mergeable |
2089 |
* being stripped. The header is always at the start of the |
1826 |
* and non-mergeable, the header is at the beginning of the |
2090 |
* mbuf data. num_buffers was already saved (and not needed) |
1827 |
* mbuf data. We no longer need num_buffers, so always use a |
2091 |
* so use the standard header. |
1828 |
* regular header. |
|
|
1829 |
* |
1830 |
* BMV: Is this memcpy() expensive? We know the mbuf data is |
1831 |
* still valid even after the m_adj(). |
2092 |
*/ |
1832 |
*/ |
2093 |
hdr = mtod(m, struct virtio_net_hdr *); |
1833 |
memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); |
2094 |
lhdr.flags = hdr->flags; |
|
|
2095 |
lhdr.gso_type = hdr->gso_type; |
2096 |
lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len); |
2097 |
lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size); |
2098 |
lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start); |
2099 |
lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset); |
2100 |
m_adj(m, adjsz); |
1834 |
m_adj(m, adjsz); |
2101 |
|
1835 |
|
2102 |
vtnet_rxq_input(rxq, m, &lhdr); |
1836 |
vtnet_rxq_input(rxq, m, hdr); |
|
|
1837 |
|
1838 |
/* Must recheck after dropping the Rx lock. */ |
1839 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) |
1840 |
break; |
2103 |
} |
1841 |
} |
2104 |
|
1842 |
|
2105 |
if (deq > 0) { |
1843 |
if (deq > 0) |
2106 |
#if defined(INET) || defined(INET6) |
|
|
2107 |
tcp_lro_flush_all(&rxq->vtnrx_lro); |
2108 |
#endif |
2109 |
virtqueue_notify(vq); |
1844 |
virtqueue_notify(vq); |
2110 |
} |
|
|
2111 |
|
1845 |
|
2112 |
return (count > 0 ? 0 : EAGAIN); |
1846 |
return (count > 0 ? 0 : EAGAIN); |
2113 |
} |
1847 |
} |
Lines 2136-2141
Link Here
|
2136 |
return; |
1870 |
return; |
2137 |
} |
1871 |
} |
2138 |
|
1872 |
|
|
|
1873 |
#ifdef DEV_NETMAP |
1874 |
if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS) |
1875 |
return; |
1876 |
#endif /* DEV_NETMAP */ |
1877 |
|
2139 |
VTNET_RXQ_LOCK(rxq); |
1878 |
VTNET_RXQ_LOCK(rxq); |
2140 |
|
1879 |
|
2141 |
again: |
1880 |
again: |
Lines 2155-2162
Link Here
|
2155 |
if (tries++ < VTNET_INTR_DISABLE_RETRIES) |
1894 |
if (tries++ < VTNET_INTR_DISABLE_RETRIES) |
2156 |
goto again; |
1895 |
goto again; |
2157 |
|
1896 |
|
2158 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
|
|
2159 |
VTNET_RXQ_UNLOCK(rxq); |
1897 |
VTNET_RXQ_UNLOCK(rxq); |
|
|
1898 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
2160 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
1899 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
2161 |
} else |
1900 |
} else |
2162 |
VTNET_RXQ_UNLOCK(rxq); |
1901 |
VTNET_RXQ_UNLOCK(rxq); |
Lines 2186-2234
Link Here
|
2186 |
if (!more) |
1925 |
if (!more) |
2187 |
vtnet_rxq_disable_intr(rxq); |
1926 |
vtnet_rxq_disable_intr(rxq); |
2188 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
1927 |
rxq->vtnrx_stats.vrxs_rescheduled++; |
2189 |
VTNET_RXQ_UNLOCK(rxq); |
|
|
2190 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
1928 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
2191 |
} else |
1929 |
} |
2192 |
VTNET_RXQ_UNLOCK(rxq); |
|
|
2193 |
} |
2194 |
|
1930 |
|
2195 |
static int |
1931 |
VTNET_RXQ_UNLOCK(rxq); |
2196 |
vtnet_txq_intr_threshold(struct vtnet_txq *txq) |
|
|
2197 |
{ |
2198 |
struct vtnet_softc *sc; |
2199 |
int threshold; |
2200 |
|
2201 |
sc = txq->vtntx_sc; |
2202 |
|
2203 |
/* |
2204 |
* The Tx interrupt is disabled until the queue free count falls |
2205 |
* below our threshold. Completed frames are drained from the Tx |
2206 |
* virtqueue before transmitting new frames and in the watchdog |
2207 |
* callout, so the frequency of Tx interrupts is greatly reduced, |
2208 |
* at the cost of not freeing mbufs as quickly as they otherwise |
2209 |
* would be. |
2210 |
*/ |
2211 |
threshold = virtqueue_size(txq->vtntx_vq) / 4; |
2212 |
|
2213 |
/* |
2214 |
* Without indirect descriptors, leave enough room for the most |
2215 |
* segments we handle. |
2216 |
*/ |
2217 |
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && |
2218 |
threshold < sc->vtnet_tx_nsegs) |
2219 |
threshold = sc->vtnet_tx_nsegs; |
2220 |
|
2221 |
return (threshold); |
2222 |
} |
1932 |
} |
2223 |
|
1933 |
|
2224 |
static int |
1934 |
static int |
2225 |
vtnet_txq_below_threshold(struct vtnet_txq *txq) |
1935 |
vtnet_txq_below_threshold(struct vtnet_txq *txq) |
2226 |
{ |
1936 |
{ |
|
|
1937 |
struct vtnet_softc *sc; |
2227 |
struct virtqueue *vq; |
1938 |
struct virtqueue *vq; |
2228 |
|
1939 |
|
|
|
1940 |
sc = txq->vtntx_sc; |
2229 |
vq = txq->vtntx_vq; |
1941 |
vq = txq->vtntx_vq; |
2230 |
|
1942 |
|
2231 |
return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold); |
1943 |
return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); |
2232 |
} |
1944 |
} |
2233 |
|
1945 |
|
2234 |
static int |
1946 |
static int |
Lines 2263-2275
Link Here
|
2263 |
struct virtqueue *vq; |
1975 |
struct virtqueue *vq; |
2264 |
struct vtnet_tx_header *txhdr; |
1976 |
struct vtnet_tx_header *txhdr; |
2265 |
int last; |
1977 |
int last; |
|
|
1978 |
#ifdef DEV_NETMAP |
1979 |
int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX, |
1980 |
txq->vtntx_id); |
1981 |
#else /* !DEV_NETMAP */ |
1982 |
int netmap_bufs = 0; |
1983 |
#endif /* !DEV_NETMAP */ |
2266 |
|
1984 |
|
2267 |
vq = txq->vtntx_vq; |
1985 |
vq = txq->vtntx_vq; |
2268 |
last = 0; |
1986 |
last = 0; |
2269 |
|
1987 |
|
2270 |
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { |
1988 |
while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { |
2271 |
m_freem(txhdr->vth_mbuf); |
1989 |
if (!netmap_bufs) { |
2272 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
1990 |
m_freem(txhdr->vth_mbuf); |
|
|
1991 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
1992 |
} |
2273 |
} |
1993 |
} |
2274 |
|
1994 |
|
2275 |
KASSERT(virtqueue_empty(vq), |
1995 |
KASSERT(virtqueue_empty(vq), |
Lines 2277-2287
Link Here
|
2277 |
} |
1997 |
} |
2278 |
|
1998 |
|
2279 |
/* |
1999 |
/* |
2280 |
* BMV: This can go away once we finally have offsets in the mbuf header. |
2000 |
* BMV: Much of this can go away once we finally have offsets in |
|
|
2001 |
* the mbuf packet header. Bug andre@. |
2281 |
*/ |
2002 |
*/ |
2282 |
static int |
2003 |
static int |
2283 |
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, |
2004 |
vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, |
2284 |
int *proto, int *start) |
2005 |
int *etype, int *proto, int *start) |
2285 |
{ |
2006 |
{ |
2286 |
struct vtnet_softc *sc; |
2007 |
struct vtnet_softc *sc; |
2287 |
struct ether_vlan_header *evh; |
2008 |
struct ether_vlan_header *evh; |
Lines 2325-2331
Link Here
|
2325 |
break; |
2046 |
break; |
2326 |
#endif |
2047 |
#endif |
2327 |
default: |
2048 |
default: |
2328 |
sc->vtnet_stats.tx_csum_unknown_ethtype++; |
2049 |
sc->vtnet_stats.tx_csum_bad_ethtype++; |
2329 |
return (EINVAL); |
2050 |
return (EINVAL); |
2330 |
} |
2051 |
} |
2331 |
|
2052 |
|
Lines 2333-2339
Link Here
|
2333 |
} |
2054 |
} |
2334 |
|
2055 |
|
2335 |
static int |
2056 |
static int |
2336 |
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int flags, |
2057 |
vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, |
2337 |
int offset, struct virtio_net_hdr *hdr) |
2058 |
int offset, struct virtio_net_hdr *hdr) |
2338 |
{ |
2059 |
{ |
2339 |
static struct timeval lastecn; |
2060 |
static struct timeval lastecn; |
Lines 2349-2365
Link Here
|
2349 |
} else |
2070 |
} else |
2350 |
tcp = (struct tcphdr *)(m->m_data + offset); |
2071 |
tcp = (struct tcphdr *)(m->m_data + offset); |
2351 |
|
2072 |
|
2352 |
hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2)); |
2073 |
hdr->hdr_len = offset + (tcp->th_off << 2); |
2353 |
hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz); |
2074 |
hdr->gso_size = m->m_pkthdr.tso_segsz; |
2354 |
hdr->gso_type = (flags & CSUM_IP_TSO) ? |
2075 |
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : |
2355 |
VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; |
2076 |
VIRTIO_NET_HDR_GSO_TCPV6; |
2356 |
|
2077 |
|
2357 |
if (__predict_false(tcp->th_flags & TH_CWR)) { |
2078 |
if (tcp->th_flags & TH_CWR) { |
2358 |
/* |
2079 |
/* |
2359 |
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In |
2080 |
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, |
2360 |
* FreeBSD, ECN support is not on a per-interface basis, |
2081 |
* ECN support is not on a per-interface basis, but globally via |
2361 |
* but globally via the net.inet.tcp.ecn.enable sysctl |
2082 |
* the net.inet.tcp.ecn.enable sysctl knob. The default is off. |
2362 |
* knob. The default is off. |
|
|
2363 |
*/ |
2083 |
*/ |
2364 |
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { |
2084 |
if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { |
2365 |
if (ppsratecheck(&lastecn, &curecn, 1)) |
2085 |
if (ppsratecheck(&lastecn, &curecn, 1)) |
Lines 2389-2424
Link Here
|
2389 |
if (error) |
2109 |
if (error) |
2390 |
goto drop; |
2110 |
goto drop; |
2391 |
|
2111 |
|
2392 |
if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) { |
2112 |
if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || |
2393 |
/* Sanity check the parsed mbuf matches the offload flags. */ |
2113 |
(etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { |
2394 |
if (__predict_false((flags & VTNET_CSUM_OFFLOAD && |
2114 |
/* |
2395 |
etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6 |
2115 |
* We could compare the IP protocol vs the CSUM_ flag too, |
2396 |
&& etype != ETHERTYPE_IPV6))) { |
2116 |
* but that really should not be necessary. |
2397 |
sc->vtnet_stats.tx_csum_proto_mismatch++; |
2117 |
*/ |
2398 |
goto drop; |
|
|
2399 |
} |
2400 |
|
2401 |
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; |
2118 |
hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; |
2402 |
hdr->csum_start = vtnet_gtoh16(sc, csum_start); |
2119 |
hdr->csum_start = csum_start; |
2403 |
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data); |
2120 |
hdr->csum_offset = m->m_pkthdr.csum_data; |
2404 |
txq->vtntx_stats.vtxs_csum++; |
2121 |
txq->vtntx_stats.vtxs_csum++; |
2405 |
} |
2122 |
} |
2406 |
|
2123 |
|
2407 |
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { |
2124 |
if (flags & CSUM_TSO) { |
2408 |
/* |
|
|
2409 |
* Sanity check the parsed mbuf IP protocol is TCP, and |
2410 |
* VirtIO TSO reqires the checksum offloading above. |
2411 |
*/ |
2412 |
if (__predict_false(proto != IPPROTO_TCP)) { |
2125 |
if (__predict_false(proto != IPPROTO_TCP)) { |
|
|
2126 |
/* Likely failed to correctly parse the mbuf. */ |
2413 |
sc->vtnet_stats.tx_tso_not_tcp++; |
2127 |
sc->vtnet_stats.tx_tso_not_tcp++; |
2414 |
goto drop; |
2128 |
goto drop; |
2415 |
} else if (__predict_false((hdr->flags & |
|
|
2416 |
VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) { |
2417 |
sc->vtnet_stats.tx_tso_without_csum++; |
2418 |
goto drop; |
2419 |
} |
2129 |
} |
2420 |
|
2130 |
|
2421 |
error = vtnet_txq_offload_tso(txq, m, flags, csum_start, hdr); |
2131 |
KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, |
|
|
2132 |
("%s: mbuf %p TSO without checksum offload %#x", |
2133 |
__func__, m, flags)); |
2134 |
|
2135 |
error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); |
2422 |
if (error) |
2136 |
if (error) |
2423 |
goto drop; |
2137 |
goto drop; |
2424 |
} |
2138 |
} |
Lines 2447-2457
Link Here
|
2447 |
|
2161 |
|
2448 |
sglist_reset(sg); |
2162 |
sglist_reset(sg); |
2449 |
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); |
2163 |
error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); |
2450 |
if (error != 0 || sg->sg_nseg != 1) { |
2164 |
KASSERT(error == 0 && sg->sg_nseg == 1, |
2451 |
KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d", |
2165 |
("%s: error %d adding header to sglist", __func__, error)); |
2452 |
__func__, error, sg->sg_nseg)); |
|
|
2453 |
goto fail; |
2454 |
} |
2455 |
|
2166 |
|
2456 |
error = sglist_append_mbuf(sg, m); |
2167 |
error = sglist_append_mbuf(sg, m); |
2457 |
if (error) { |
2168 |
if (error) { |
Lines 2499-2507
Link Here
|
2499 |
} |
2210 |
} |
2500 |
|
2211 |
|
2501 |
/* |
2212 |
/* |
2502 |
* Always use the non-mergeable header, regardless if mergable headers |
2213 |
* Always use the non-mergeable header, regardless if the feature |
2503 |
* were negotiated, because for transmit num_buffers is always zero. |
2214 |
* was negotiated. For transmit, num_buffers is always zero. The |
2504 |
* The vtnet_hdr_size is used to enqueue the right header size segment. |
2215 |
* vtnet_hdr_size is used to enqueue the correct header size. |
2505 |
*/ |
2216 |
*/ |
2506 |
hdr = &txhdr->vth_uhdr.hdr; |
2217 |
hdr = &txhdr->vth_uhdr.hdr; |
2507 |
|
2218 |
|
Lines 2523-2531
Link Here
|
2523 |
} |
2234 |
} |
2524 |
|
2235 |
|
2525 |
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); |
2236 |
error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); |
|
|
2237 |
if (error == 0) |
2238 |
return (0); |
2239 |
|
2526 |
fail: |
2240 |
fail: |
2527 |
if (error) |
2241 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
2528 |
uma_zfree(vtnet_tx_header_zone, txhdr); |
|
|
2529 |
|
2242 |
|
2530 |
return (error); |
2243 |
return (error); |
2531 |
} |
2244 |
} |
Lines 2674-2679
Link Here
|
2674 |
sc = ifp->if_softc; |
2387 |
sc = ifp->if_softc; |
2675 |
npairs = sc->vtnet_act_vq_pairs; |
2388 |
npairs = sc->vtnet_act_vq_pairs; |
2676 |
|
2389 |
|
|
|
2390 |
/* check if flowid is set */ |
2677 |
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) |
2391 |
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) |
2678 |
i = m->m_pkthdr.flowid % npairs; |
2392 |
i = m->m_pkthdr.flowid % npairs; |
2679 |
else |
2393 |
else |
Lines 2763-2775
Link Here
|
2763 |
deq = 0; |
2477 |
deq = 0; |
2764 |
VTNET_TXQ_LOCK_ASSERT(txq); |
2478 |
VTNET_TXQ_LOCK_ASSERT(txq); |
2765 |
|
2479 |
|
2766 |
#ifdef DEV_NETMAP |
|
|
2767 |
if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) { |
2768 |
virtqueue_disable_intr(vq); // XXX luigi |
2769 |
return (0); // XXX or 1 ? |
2770 |
} |
2771 |
#endif |
2772 |
|
2773 |
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { |
2480 |
while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { |
2774 |
m = txhdr->vth_mbuf; |
2481 |
m = txhdr->vth_mbuf; |
2775 |
deq++; |
2482 |
deq++; |
Lines 2811-2816
Link Here
|
2811 |
return; |
2518 |
return; |
2812 |
} |
2519 |
} |
2813 |
|
2520 |
|
|
|
2521 |
#ifdef DEV_NETMAP |
2522 |
if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS) |
2523 |
return; |
2524 |
#endif /* DEV_NETMAP */ |
2525 |
|
2814 |
VTNET_TXQ_LOCK(txq); |
2526 |
VTNET_TXQ_LOCK(txq); |
2815 |
|
2527 |
|
2816 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
2528 |
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
Lines 2997-3003
Link Here
|
2997 |
* Most drivers just ignore the return value - it only fails |
2709 |
* Most drivers just ignore the return value - it only fails |
2998 |
* with ENOMEM so an error is not likely. |
2710 |
* with ENOMEM so an error is not likely. |
2999 |
*/ |
2711 |
*/ |
3000 |
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { |
2712 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
3001 |
rxq = &sc->vtnet_rxqs[i]; |
2713 |
rxq = &sc->vtnet_rxqs[i]; |
3002 |
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, |
2714 |
error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, |
3003 |
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); |
2715 |
"%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); |
Lines 3027-3033
Link Here
|
3027 |
rxq = &sc->vtnet_rxqs[i]; |
2739 |
rxq = &sc->vtnet_rxqs[i]; |
3028 |
if (rxq->vtnrx_tq != NULL) { |
2740 |
if (rxq->vtnrx_tq != NULL) { |
3029 |
taskqueue_free(rxq->vtnrx_tq); |
2741 |
taskqueue_free(rxq->vtnrx_tq); |
3030 |
rxq->vtnrx_vq = NULL; |
2742 |
rxq->vtnrx_tq = NULL; |
3031 |
} |
2743 |
} |
3032 |
|
2744 |
|
3033 |
txq = &sc->vtnet_txqs[i]; |
2745 |
txq = &sc->vtnet_txqs[i]; |
Lines 3067-3078
Link Here
|
3067 |
struct vtnet_txq *txq; |
2779 |
struct vtnet_txq *txq; |
3068 |
int i; |
2780 |
int i; |
3069 |
|
2781 |
|
3070 |
#ifdef DEV_NETMAP |
2782 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3071 |
if (nm_native_on(NA(sc->vtnet_ifp))) |
|
|
3072 |
return; |
3073 |
#endif |
3074 |
|
3075 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
3076 |
rxq = &sc->vtnet_rxqs[i]; |
2783 |
rxq = &sc->vtnet_rxqs[i]; |
3077 |
vtnet_rxq_free_mbufs(rxq); |
2784 |
vtnet_rxq_free_mbufs(rxq); |
3078 |
|
2785 |
|
Lines 3088-3100
Link Here
|
3088 |
struct vtnet_txq *txq; |
2795 |
struct vtnet_txq *txq; |
3089 |
int i; |
2796 |
int i; |
3090 |
|
2797 |
|
3091 |
VTNET_CORE_LOCK_ASSERT(sc); |
|
|
3092 |
|
3093 |
/* |
2798 |
/* |
3094 |
* Lock and unlock the per-queue mutex so we known the stop |
2799 |
* Lock and unlock the per-queue mutex so we known the stop |
3095 |
* state is visible. Doing only the active queues should be |
2800 |
* state is visible. Doing only the active queues should be |
3096 |
* sufficient, but it does not cost much extra to do all the |
2801 |
* sufficient, but it does not cost much extra to do all the |
3097 |
* queues. |
2802 |
* queues. Note we hold the core mutex here too. |
3098 |
*/ |
2803 |
*/ |
3099 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
2804 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
3100 |
rxq = &sc->vtnet_rxqs[i]; |
2805 |
rxq = &sc->vtnet_rxqs[i]; |
Lines 3133-3140
Link Here
|
3133 |
virtio_stop(dev); |
2838 |
virtio_stop(dev); |
3134 |
vtnet_stop_rendezvous(sc); |
2839 |
vtnet_stop_rendezvous(sc); |
3135 |
|
2840 |
|
|
|
2841 |
/* Free any mbufs left in the virtqueues. */ |
3136 |
vtnet_drain_rxtx_queues(sc); |
2842 |
vtnet_drain_rxtx_queues(sc); |
3137 |
sc->vtnet_act_vq_pairs = 1; |
|
|
3138 |
} |
2843 |
} |
3139 |
|
2844 |
|
3140 |
static int |
2845 |
static int |
Lines 3143-3179
Link Here
|
3143 |
device_t dev; |
2848 |
device_t dev; |
3144 |
struct ifnet *ifp; |
2849 |
struct ifnet *ifp; |
3145 |
uint64_t features; |
2850 |
uint64_t features; |
3146 |
int error; |
2851 |
int mask, error; |
3147 |
|
2852 |
|
3148 |
dev = sc->vtnet_dev; |
2853 |
dev = sc->vtnet_dev; |
3149 |
ifp = sc->vtnet_ifp; |
2854 |
ifp = sc->vtnet_ifp; |
3150 |
features = sc->vtnet_negotiated_features; |
2855 |
features = sc->vtnet_features; |
3151 |
|
2856 |
|
|
|
2857 |
mask = 0; |
2858 |
#if defined(INET) |
2859 |
mask |= IFCAP_RXCSUM; |
2860 |
#endif |
2861 |
#if defined (INET6) |
2862 |
mask |= IFCAP_RXCSUM_IPV6; |
2863 |
#endif |
2864 |
|
3152 |
/* |
2865 |
/* |
3153 |
* Re-negotiate with the host, removing any disabled receive |
2866 |
* Re-negotiate with the host, removing any disabled receive |
3154 |
* features. Transmit features are disabled only on our side |
2867 |
* features. Transmit features are disabled only on our side |
3155 |
* via if_capenable and if_hwassist. |
2868 |
* via if_capenable and if_hwassist. |
3156 |
*/ |
2869 |
*/ |
3157 |
|
2870 |
|
3158 |
if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0) |
2871 |
if (ifp->if_capabilities & mask) { |
3159 |
features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES); |
2872 |
/* |
|
|
2873 |
* We require both IPv4 and IPv6 offloading to be enabled |
2874 |
* in order to negotiated it: VirtIO does not distinguish |
2875 |
* between the two. |
2876 |
*/ |
2877 |
if ((ifp->if_capenable & mask) != mask) |
2878 |
features &= ~VIRTIO_NET_F_GUEST_CSUM; |
2879 |
} |
3160 |
|
2880 |
|
3161 |
if ((ifp->if_capenable & IFCAP_LRO) == 0) |
2881 |
if (ifp->if_capabilities & IFCAP_LRO) { |
3162 |
features &= ~VTNET_LRO_FEATURES; |
2882 |
if ((ifp->if_capenable & IFCAP_LRO) == 0) |
|
|
2883 |
features &= ~VTNET_LRO_FEATURES; |
2884 |
} |
3163 |
|
2885 |
|
3164 |
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) |
2886 |
if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { |
3165 |
features &= ~VIRTIO_NET_F_CTRL_VLAN; |
2887 |
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) |
|
|
2888 |
features &= ~VIRTIO_NET_F_CTRL_VLAN; |
2889 |
} |
3166 |
|
2890 |
|
3167 |
error = virtio_reinit(dev, features); |
2891 |
error = virtio_reinit(dev, features); |
3168 |
if (error) { |
2892 |
if (error) |
3169 |
device_printf(dev, "virtio reinit error %d\n", error); |
2893 |
device_printf(dev, "virtio reinit error %d\n", error); |
3170 |
return (error); |
|
|
3171 |
} |
3172 |
|
2894 |
|
3173 |
sc->vtnet_features = features; |
2895 |
return (error); |
3174 |
virtio_reinit_complete(dev); |
|
|
3175 |
|
3176 |
return (0); |
3177 |
} |
2896 |
} |
3178 |
|
2897 |
|
3179 |
static void |
2898 |
static void |
Lines 3184-3190
Link Here
|
3184 |
ifp = sc->vtnet_ifp; |
2903 |
ifp = sc->vtnet_ifp; |
3185 |
|
2904 |
|
3186 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { |
2905 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { |
|
|
2906 |
/* Restore promiscuous and all-multicast modes. */ |
3187 |
vtnet_rx_filter(sc); |
2907 |
vtnet_rx_filter(sc); |
|
|
2908 |
/* Restore filtered MAC addresses. */ |
3188 |
vtnet_rx_filter_mac(sc); |
2909 |
vtnet_rx_filter_mac(sc); |
3189 |
} |
2910 |
} |
3190 |
|
2911 |
|
Lines 3196-3225
Link Here
|
3196 |
vtnet_init_rx_queues(struct vtnet_softc *sc) |
2917 |
vtnet_init_rx_queues(struct vtnet_softc *sc) |
3197 |
{ |
2918 |
{ |
3198 |
device_t dev; |
2919 |
device_t dev; |
3199 |
struct ifnet *ifp; |
|
|
3200 |
struct vtnet_rxq *rxq; |
2920 |
struct vtnet_rxq *rxq; |
3201 |
int i, clustersz, error; |
2921 |
int i, clsize, error; |
3202 |
|
2922 |
|
3203 |
dev = sc->vtnet_dev; |
2923 |
dev = sc->vtnet_dev; |
3204 |
ifp = sc->vtnet_ifp; |
|
|
3205 |
|
2924 |
|
3206 |
clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu); |
2925 |
/* |
3207 |
sc->vtnet_rx_clustersz = clustersz; |
2926 |
* Use the new cluster size if one has been set (via a MTU |
3208 |
|
2927 |
* change). Otherwise, use the standard 2K clusters. |
3209 |
if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) { |
2928 |
* |
3210 |
sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) + |
2929 |
* BMV: It might make sense to use page sized clusters as |
3211 |
VTNET_MAX_RX_SIZE, clustersz); |
2930 |
* the default (depending on the features negotiated). |
3212 |
KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, |
2931 |
*/ |
3213 |
("%s: too many rx mbufs %d for %d segments", __func__, |
2932 |
if (sc->vtnet_rx_new_clsize != 0) { |
3214 |
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); |
2933 |
clsize = sc->vtnet_rx_new_clsize; |
|
|
2934 |
sc->vtnet_rx_new_clsize = 0; |
3215 |
} else |
2935 |
} else |
3216 |
sc->vtnet_rx_nmbufs = 1; |
2936 |
clsize = MCLBYTES; |
3217 |
|
2937 |
|
3218 |
#ifdef DEV_NETMAP |
2938 |
sc->vtnet_rx_clsize = clsize; |
3219 |
if (vtnet_netmap_init_rx_buffers(sc)) |
2939 |
sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); |
3220 |
return (0); |
|
|
3221 |
#endif |
3222 |
|
2940 |
|
|
|
2941 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || |
2942 |
sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, |
2943 |
("%s: too many rx mbufs %d for %d segments", __func__, |
2944 |
sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); |
2945 |
|
3223 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
2946 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3224 |
rxq = &sc->vtnet_rxqs[i]; |
2947 |
rxq = &sc->vtnet_rxqs[i]; |
3225 |
|
2948 |
|
Lines 3229-3235
Link Here
|
3229 |
VTNET_RXQ_UNLOCK(rxq); |
2952 |
VTNET_RXQ_UNLOCK(rxq); |
3230 |
|
2953 |
|
3231 |
if (error) { |
2954 |
if (error) { |
3232 |
device_printf(dev, "cannot populate Rx queue %d\n", i); |
2955 |
device_printf(dev, |
|
|
2956 |
"cannot allocate mbufs for Rx queue %d\n", i); |
3233 |
return (error); |
2957 |
return (error); |
3234 |
} |
2958 |
} |
3235 |
} |
2959 |
} |
Lines 3246-3252
Link Here
|
3246 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
2970 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3247 |
txq = &sc->vtnet_txqs[i]; |
2971 |
txq = &sc->vtnet_txqs[i]; |
3248 |
txq->vtntx_watchdog = 0; |
2972 |
txq->vtntx_watchdog = 0; |
3249 |
txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq); |
|
|
3250 |
} |
2973 |
} |
3251 |
|
2974 |
|
3252 |
return (0); |
2975 |
return (0); |
Lines 3276-3360
Link Here
|
3276 |
|
2999 |
|
3277 |
dev = sc->vtnet_dev; |
3000 |
dev = sc->vtnet_dev; |
3278 |
|
3001 |
|
3279 |
if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) { |
3002 |
if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { |
3280 |
sc->vtnet_act_vq_pairs = 1; |
3003 |
sc->vtnet_act_vq_pairs = 1; |
3281 |
return; |
3004 |
return; |
3282 |
} |
3005 |
} |
3283 |
|
3006 |
|
3284 |
npairs = sc->vtnet_req_vq_pairs; |
3007 |
npairs = sc->vtnet_requested_vq_pairs; |
3285 |
|
3008 |
|
3286 |
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { |
3009 |
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { |
3287 |
device_printf(dev, "cannot set active queue pairs to %d, " |
3010 |
device_printf(dev, |
3288 |
"falling back to 1 queue pair\n", npairs); |
3011 |
"cannot set active queue pairs to %d\n", npairs); |
3289 |
npairs = 1; |
3012 |
npairs = 1; |
3290 |
} |
3013 |
} |
3291 |
|
3014 |
|
3292 |
sc->vtnet_act_vq_pairs = npairs; |
3015 |
sc->vtnet_act_vq_pairs = npairs; |
3293 |
} |
3016 |
} |
3294 |
|
3017 |
|
3295 |
static void |
|
|
3296 |
vtnet_update_rx_offloads(struct vtnet_softc *sc) |
3297 |
{ |
3298 |
struct ifnet *ifp; |
3299 |
uint64_t features; |
3300 |
int error; |
3301 |
|
3302 |
ifp = sc->vtnet_ifp; |
3303 |
features = sc->vtnet_features; |
3304 |
|
3305 |
VTNET_CORE_LOCK_ASSERT(sc); |
3306 |
|
3307 |
if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { |
3308 |
if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) |
3309 |
features |= VIRTIO_NET_F_GUEST_CSUM; |
3310 |
else |
3311 |
features &= ~VIRTIO_NET_F_GUEST_CSUM; |
3312 |
} |
3313 |
|
3314 |
if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) { |
3315 |
if (ifp->if_capenable & IFCAP_LRO) |
3316 |
features |= VTNET_LRO_FEATURES; |
3317 |
else |
3318 |
features &= ~VTNET_LRO_FEATURES; |
3319 |
} |
3320 |
|
3321 |
error = vtnet_ctrl_guest_offloads(sc, |
3322 |
features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | |
3323 |
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN | |
3324 |
VIRTIO_NET_F_GUEST_UFO)); |
3325 |
if (error) { |
3326 |
device_printf(sc->vtnet_dev, |
3327 |
"%s: cannot update Rx features\n", __func__); |
3328 |
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
3329 |
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
3330 |
vtnet_init_locked(sc); |
3331 |
} |
3332 |
} else |
3333 |
sc->vtnet_features = features; |
3334 |
} |
3335 |
|
3336 |
static int |
3018 |
static int |
3337 |
vtnet_reinit(struct vtnet_softc *sc) |
3019 |
vtnet_reinit(struct vtnet_softc *sc) |
3338 |
{ |
3020 |
{ |
3339 |
device_t dev; |
|
|
3340 |
struct ifnet *ifp; |
3021 |
struct ifnet *ifp; |
3341 |
int error; |
3022 |
int error; |
3342 |
|
3023 |
|
3343 |
dev = sc->vtnet_dev; |
|
|
3344 |
ifp = sc->vtnet_ifp; |
3024 |
ifp = sc->vtnet_ifp; |
3345 |
|
3025 |
|
|
|
3026 |
/* Use the current MAC address. */ |
3346 |
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); |
3027 |
bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); |
|
|
3028 |
vtnet_set_hwaddr(sc); |
3347 |
|
3029 |
|
3348 |
error = vtnet_virtio_reinit(sc); |
|
|
3349 |
if (error) |
3350 |
return (error); |
3351 |
|
3352 |
vtnet_set_macaddr(sc); |
3353 |
vtnet_set_active_vq_pairs(sc); |
3030 |
vtnet_set_active_vq_pairs(sc); |
3354 |
|
3031 |
|
3355 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) |
|
|
3356 |
vtnet_init_rx_filters(sc); |
3357 |
|
3358 |
ifp->if_hwassist = 0; |
3032 |
ifp->if_hwassist = 0; |
3359 |
if (ifp->if_capenable & IFCAP_TXCSUM) |
3033 |
if (ifp->if_capenable & IFCAP_TXCSUM) |
3360 |
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; |
3034 |
ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; |
Lines 3365-3374
Link Here
|
3365 |
if (ifp->if_capenable & IFCAP_TSO6) |
3039 |
if (ifp->if_capenable & IFCAP_TSO6) |
3366 |
ifp->if_hwassist |= CSUM_IP6_TSO; |
3040 |
ifp->if_hwassist |= CSUM_IP6_TSO; |
3367 |
|
3041 |
|
|
|
3042 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) |
3043 |
vtnet_init_rx_filters(sc); |
3044 |
|
3368 |
error = vtnet_init_rxtx_queues(sc); |
3045 |
error = vtnet_init_rxtx_queues(sc); |
3369 |
if (error) |
3046 |
if (error) |
3370 |
return (error); |
3047 |
return (error); |
3371 |
|
3048 |
|
|
|
3049 |
vtnet_enable_interrupts(sc); |
3050 |
ifp->if_drv_flags |= IFF_DRV_RUNNING; |
3051 |
|
3372 |
return (0); |
3052 |
return (0); |
3373 |
} |
3053 |
} |
3374 |
|
3054 |
|
Lines 3388-3402
Link Here
|
3388 |
|
3068 |
|
3389 |
vtnet_stop(sc); |
3069 |
vtnet_stop(sc); |
3390 |
|
3070 |
|
3391 |
if (vtnet_reinit(sc) != 0) { |
3071 |
/* Reinitialize with the host. */ |
3392 |
vtnet_stop(sc); |
3072 |
if (vtnet_virtio_reinit(sc) != 0) |
3393 |
return; |
3073 |
goto fail; |
3394 |
} |
|
|
3395 |
|
3074 |
|
3396 |
ifp->if_drv_flags |= IFF_DRV_RUNNING; |
3075 |
if (vtnet_reinit(sc) != 0) |
|
|
3076 |
goto fail; |
3077 |
|
3078 |
virtio_reinit_complete(dev); |
3079 |
|
3397 |
vtnet_update_link_status(sc); |
3080 |
vtnet_update_link_status(sc); |
3398 |
vtnet_enable_interrupts(sc); |
|
|
3399 |
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); |
3081 |
callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); |
|
|
3082 |
|
3083 |
return; |
3084 |
|
3085 |
fail: |
3086 |
vtnet_stop(sc); |
3400 |
} |
3087 |
} |
3401 |
|
3088 |
|
3402 |
static void |
3089 |
static void |
Lines 3406-3418
Link Here
|
3406 |
|
3093 |
|
3407 |
sc = xsc; |
3094 |
sc = xsc; |
3408 |
|
3095 |
|
3409 |
#ifdef DEV_NETMAP |
|
|
3410 |
if (!NA(sc->vtnet_ifp)) { |
3411 |
D("try to attach again"); |
3412 |
vtnet_netmap_attach(sc); |
3413 |
} |
3414 |
#endif |
3415 |
|
3416 |
VTNET_CORE_LOCK(sc); |
3096 |
VTNET_CORE_LOCK(sc); |
3417 |
vtnet_init_locked(sc); |
3097 |
vtnet_init_locked(sc); |
3418 |
VTNET_CORE_UNLOCK(sc); |
3098 |
VTNET_CORE_UNLOCK(sc); |
Lines 3421-3433
Link Here
|
3421 |
static void |
3101 |
static void |
3422 |
vtnet_free_ctrl_vq(struct vtnet_softc *sc) |
3102 |
vtnet_free_ctrl_vq(struct vtnet_softc *sc) |
3423 |
{ |
3103 |
{ |
|
|
3104 |
struct virtqueue *vq; |
3424 |
|
3105 |
|
|
|
3106 |
vq = sc->vtnet_ctrl_vq; |
3107 |
|
3425 |
/* |
3108 |
/* |
3426 |
* The control virtqueue is only polled and therefore it should |
3109 |
* The control virtqueue is only polled and therefore it should |
3427 |
* already be empty. |
3110 |
* already be empty. |
3428 |
*/ |
3111 |
*/ |
3429 |
KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), |
3112 |
KASSERT(virtqueue_empty(vq), |
3430 |
("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq)); |
3113 |
("%s: ctrl vq %p not empty", __func__, vq)); |
3431 |
} |
3114 |
} |
3432 |
|
3115 |
|
3433 |
static void |
3116 |
static void |
Lines 3438-3525
Link Here
|
3438 |
|
3121 |
|
3439 |
vq = sc->vtnet_ctrl_vq; |
3122 |
vq = sc->vtnet_ctrl_vq; |
3440 |
|
3123 |
|
3441 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ); |
|
|
3442 |
VTNET_CORE_LOCK_ASSERT(sc); |
3124 |
VTNET_CORE_LOCK_ASSERT(sc); |
|
|
3125 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, |
3126 |
("%s: CTRL_VQ feature not negotiated", __func__)); |
3443 |
|
3127 |
|
3444 |
if (!virtqueue_empty(vq)) |
3128 |
if (!virtqueue_empty(vq)) |
3445 |
return; |
3129 |
return; |
|
|
3130 |
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) |
3131 |
return; |
3446 |
|
3132 |
|
3447 |
/* |
3133 |
/* |
3448 |
* Poll for the response, but the command is likely completed before |
3134 |
* Poll for the response, but the command is likely already |
3449 |
* returning from the notify. |
3135 |
* done when we return from the notify. |
3450 |
*/ |
3136 |
*/ |
3451 |
if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) { |
3137 |
virtqueue_notify(vq); |
3452 |
virtqueue_notify(vq); |
3138 |
virtqueue_poll(vq, NULL); |
3453 |
virtqueue_poll(vq, NULL); |
|
|
3454 |
} |
3455 |
} |
3139 |
} |
3456 |
|
3140 |
|
3457 |
static int |
3141 |
static int |
3458 |
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) |
3142 |
vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) |
3459 |
{ |
3143 |
{ |
|
|
3144 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3460 |
struct sglist_seg segs[3]; |
3145 |
struct sglist_seg segs[3]; |
3461 |
struct sglist sg; |
3146 |
struct sglist sg; |
3462 |
struct { |
3147 |
uint8_t ack; |
3463 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
|
|
3464 |
uint8_t pad1; |
3465 |
uint8_t addr[ETHER_ADDR_LEN] __aligned(8); |
3466 |
uint8_t pad2; |
3467 |
uint8_t ack; |
3468 |
} s; |
3469 |
int error; |
3148 |
int error; |
3470 |
|
3149 |
|
3471 |
error = 0; |
3150 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3472 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC); |
3151 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; |
|
|
3152 |
ack = VIRTIO_NET_ERR; |
3473 |
|
3153 |
|
3474 |
s.hdr.class = VIRTIO_NET_CTRL_MAC; |
3154 |
sglist_init(&sg, 3, segs); |
3475 |
s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; |
|
|
3476 |
bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN); |
3477 |
s.ack = VIRTIO_NET_ERR; |
3478 |
|
3479 |
sglist_init(&sg, nitems(segs), segs); |
3480 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3481 |
error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN); |
3482 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3483 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3484 |
|
3485 |
if (error == 0) |
3486 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3487 |
|
3488 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3489 |
} |
3490 |
|
3491 |
static int |
3492 |
vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads) |
3493 |
{ |
3494 |
struct sglist_seg segs[3]; |
3495 |
struct sglist sg; |
3496 |
struct { |
3497 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3498 |
uint8_t pad1; |
3499 |
uint64_t offloads __aligned(8); |
3500 |
uint8_t pad2; |
3501 |
uint8_t ack; |
3502 |
} s; |
3503 |
int error; |
3504 |
|
3505 |
error = 0; |
3155 |
error = 0; |
3506 |
MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); |
3156 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
|
|
3157 |
error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); |
3158 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3159 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
3160 |
("%s: error %d adding set MAC msg to sglist", __func__, error)); |
3507 |
|
3161 |
|
3508 |
s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS; |
3162 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3509 |
s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET; |
|
|
3510 |
s.offloads = vtnet_gtoh64(sc, offloads); |
3511 |
s.ack = VIRTIO_NET_ERR; |
3512 |
|
3163 |
|
3513 |
sglist_init(&sg, nitems(segs), segs); |
3164 |
return (ack == VIRTIO_NET_OK ? 0 : EIO); |
3514 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
|
|
3515 |
error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t)); |
3516 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3517 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3518 |
|
3519 |
if (error == 0) |
3520 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3521 |
|
3522 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3523 |
} |
3165 |
} |
3524 |
|
3166 |
|
3525 |
static int |
3167 |
static int |
Lines 3528-3591
Link Here
|
3528 |
struct sglist_seg segs[3]; |
3170 |
struct sglist_seg segs[3]; |
3529 |
struct sglist sg; |
3171 |
struct sglist sg; |
3530 |
struct { |
3172 |
struct { |
3531 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3173 |
struct virtio_net_ctrl_hdr hdr; |
3532 |
uint8_t pad1; |
3174 |
uint8_t pad1; |
3533 |
struct virtio_net_ctrl_mq mq __aligned(2); |
3175 |
struct virtio_net_ctrl_mq mq; |
3534 |
uint8_t pad2; |
3176 |
uint8_t pad2; |
3535 |
uint8_t ack; |
3177 |
uint8_t ack; |
3536 |
} s; |
3178 |
} s __aligned(2); |
3537 |
int error; |
3179 |
int error; |
3538 |
|
3180 |
|
3539 |
error = 0; |
|
|
3540 |
MPASS(sc->vtnet_flags & VTNET_FLAG_MQ); |
3541 |
|
3542 |
s.hdr.class = VIRTIO_NET_CTRL_MQ; |
3181 |
s.hdr.class = VIRTIO_NET_CTRL_MQ; |
3543 |
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; |
3182 |
s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; |
3544 |
s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs); |
3183 |
s.mq.virtqueue_pairs = npairs; |
3545 |
s.ack = VIRTIO_NET_ERR; |
3184 |
s.ack = VIRTIO_NET_ERR; |
3546 |
|
3185 |
|
3547 |
sglist_init(&sg, nitems(segs), segs); |
3186 |
sglist_init(&sg, 3, segs); |
|
|
3187 |
error = 0; |
3548 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3188 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3549 |
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); |
3189 |
error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); |
3550 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3190 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3551 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3191 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
|
|
3192 |
("%s: error %d adding MQ message to sglist", __func__, error)); |
3552 |
|
3193 |
|
3553 |
if (error == 0) |
3194 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3554 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
|
|
3555 |
|
3195 |
|
3556 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3196 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3557 |
} |
3197 |
} |
3558 |
|
3198 |
|
3559 |
static int |
3199 |
static int |
3560 |
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, int on) |
3200 |
vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) |
3561 |
{ |
3201 |
{ |
3562 |
struct sglist_seg segs[3]; |
3202 |
struct sglist_seg segs[3]; |
3563 |
struct sglist sg; |
3203 |
struct sglist sg; |
3564 |
struct { |
3204 |
struct { |
3565 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3205 |
struct virtio_net_ctrl_hdr hdr; |
3566 |
uint8_t pad1; |
3206 |
uint8_t pad1; |
3567 |
uint8_t onoff; |
3207 |
uint8_t onoff; |
3568 |
uint8_t pad2; |
3208 |
uint8_t pad2; |
3569 |
uint8_t ack; |
3209 |
uint8_t ack; |
3570 |
} s; |
3210 |
} s __aligned(2); |
3571 |
int error; |
3211 |
int error; |
3572 |
|
3212 |
|
3573 |
error = 0; |
3213 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, |
3574 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); |
3214 |
("%s: CTRL_RX feature not negotiated", __func__)); |
3575 |
|
3215 |
|
3576 |
s.hdr.class = VIRTIO_NET_CTRL_RX; |
3216 |
s.hdr.class = VIRTIO_NET_CTRL_RX; |
3577 |
s.hdr.cmd = cmd; |
3217 |
s.hdr.cmd = cmd; |
3578 |
s.onoff = !!on; |
3218 |
s.onoff = !!on; |
3579 |
s.ack = VIRTIO_NET_ERR; |
3219 |
s.ack = VIRTIO_NET_ERR; |
3580 |
|
3220 |
|
3581 |
sglist_init(&sg, nitems(segs), segs); |
3221 |
sglist_init(&sg, 3, segs); |
|
|
3222 |
error = 0; |
3582 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3223 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3583 |
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); |
3224 |
error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); |
3584 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3225 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3585 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3226 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
|
|
3227 |
("%s: error %d adding Rx message to sglist", __func__, error)); |
3586 |
|
3228 |
|
3587 |
if (error == 0) |
3229 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3588 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
|
|
3589 |
|
3230 |
|
3590 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3231 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3591 |
} |
3232 |
} |
Lines 3593-3608
Link Here
|
3593 |
static int |
3234 |
static int |
3594 |
vtnet_set_promisc(struct vtnet_softc *sc, int on) |
3235 |
vtnet_set_promisc(struct vtnet_softc *sc, int on) |
3595 |
{ |
3236 |
{ |
|
|
3237 |
|
3596 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); |
3238 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); |
3597 |
} |
3239 |
} |
3598 |
|
3240 |
|
3599 |
static int |
3241 |
static int |
3600 |
vtnet_set_allmulti(struct vtnet_softc *sc, int on) |
3242 |
vtnet_set_allmulti(struct vtnet_softc *sc, int on) |
3601 |
{ |
3243 |
{ |
|
|
3244 |
|
3602 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); |
3245 |
return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); |
3603 |
} |
3246 |
} |
3604 |
|
3247 |
|
|
|
3248 |
/* |
3249 |
* The device defaults to promiscuous mode for backwards compatibility. |
3250 |
* Turn it off at attach time if possible. |
3251 |
*/ |
3605 |
static void |
3252 |
static void |
|
|
3253 |
vtnet_attach_disable_promisc(struct vtnet_softc *sc) |
3254 |
{ |
3255 |
struct ifnet *ifp; |
3256 |
|
3257 |
ifp = sc->vtnet_ifp; |
3258 |
|
3259 |
VTNET_CORE_LOCK(sc); |
3260 |
if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { |
3261 |
ifp->if_flags |= IFF_PROMISC; |
3262 |
} else if (vtnet_set_promisc(sc, 0) != 0) { |
3263 |
ifp->if_flags |= IFF_PROMISC; |
3264 |
device_printf(sc->vtnet_dev, |
3265 |
"cannot disable default promiscuous mode\n"); |
3266 |
} |
3267 |
VTNET_CORE_UNLOCK(sc); |
3268 |
} |
3269 |
|
3270 |
static void |
3606 |
vtnet_rx_filter(struct vtnet_softc *sc) |
3271 |
vtnet_rx_filter(struct vtnet_softc *sc) |
3607 |
{ |
3272 |
{ |
3608 |
device_t dev; |
3273 |
device_t dev; |
Lines 3613-3627
Link Here
|
3613 |
|
3278 |
|
3614 |
VTNET_CORE_LOCK_ASSERT(sc); |
3279 |
VTNET_CORE_LOCK_ASSERT(sc); |
3615 |
|
3280 |
|
3616 |
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) { |
3281 |
if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) |
3617 |
device_printf(dev, "cannot %s promiscuous mode\n", |
3282 |
device_printf(dev, "cannot %s promiscuous mode\n", |
3618 |
ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); |
3283 |
ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); |
3619 |
} |
|
|
3620 |
|
3284 |
|
3621 |
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) { |
3285 |
if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) |
3622 |
device_printf(dev, "cannot %s all-multicast mode\n", |
3286 |
device_printf(dev, "cannot %s all-multicast mode\n", |
3623 |
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); |
3287 |
ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); |
3624 |
} |
|
|
3625 |
} |
3288 |
} |
3626 |
|
3289 |
|
3627 |
static void |
3290 |
static void |
Lines 3639-3653
Link Here
|
3639 |
|
3302 |
|
3640 |
ifp = sc->vtnet_ifp; |
3303 |
ifp = sc->vtnet_ifp; |
3641 |
filter = sc->vtnet_mac_filter; |
3304 |
filter = sc->vtnet_mac_filter; |
3642 |
|
|
|
3643 |
ucnt = 0; |
3305 |
ucnt = 0; |
3644 |
mcnt = 0; |
3306 |
mcnt = 0; |
3645 |
promisc = 0; |
3307 |
promisc = 0; |
3646 |
allmulti = 0; |
3308 |
allmulti = 0; |
3647 |
error = 0; |
|
|
3648 |
|
3309 |
|
3649 |
MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); |
|
|
3650 |
VTNET_CORE_LOCK_ASSERT(sc); |
3310 |
VTNET_CORE_LOCK_ASSERT(sc); |
|
|
3311 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, |
3312 |
("%s: CTRL_RX feature not negotiated", __func__)); |
3651 |
|
3313 |
|
3652 |
/* Unicast MAC addresses: */ |
3314 |
/* Unicast MAC addresses: */ |
3653 |
if_addr_rlock(ifp); |
3315 |
if_addr_rlock(ifp); |
Lines 3668-3673
Link Here
|
3668 |
} |
3330 |
} |
3669 |
if_addr_runlock(ifp); |
3331 |
if_addr_runlock(ifp); |
3670 |
|
3332 |
|
|
|
3333 |
if (promisc != 0) { |
3334 |
filter->vmf_unicast.nentries = 0; |
3335 |
if_printf(ifp, "more than %d MAC addresses assigned, " |
3336 |
"falling back to promiscuous mode\n", |
3337 |
VTNET_MAX_MAC_ENTRIES); |
3338 |
} else |
3339 |
filter->vmf_unicast.nentries = ucnt; |
3340 |
|
3671 |
/* Multicast MAC addresses: */ |
3341 |
/* Multicast MAC addresses: */ |
3672 |
if_maddr_rlock(ifp); |
3342 |
if_maddr_rlock(ifp); |
3673 |
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
3343 |
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
Lines 3684-3723
Link Here
|
3684 |
} |
3354 |
} |
3685 |
if_maddr_runlock(ifp); |
3355 |
if_maddr_runlock(ifp); |
3686 |
|
3356 |
|
3687 |
if (promisc != 0) { |
|
|
3688 |
if_printf(ifp, "cannot filter more than %d MAC addresses, " |
3689 |
"falling back to promiscuous mode\n", |
3690 |
VTNET_MAX_MAC_ENTRIES); |
3691 |
ucnt = 0; |
3692 |
} |
3693 |
if (allmulti != 0) { |
3357 |
if (allmulti != 0) { |
3694 |
if_printf(ifp, "cannot filter more than %d multicast MAC " |
3358 |
filter->vmf_multicast.nentries = 0; |
3695 |
"addresses, falling back to all-multicast mode\n", |
3359 |
if_printf(ifp, "more than %d multicast MAC addresses " |
|
|
3360 |
"assigned, falling back to all-multicast mode\n", |
3696 |
VTNET_MAX_MAC_ENTRIES); |
3361 |
VTNET_MAX_MAC_ENTRIES); |
3697 |
mcnt = 0; |
3362 |
} else |
3698 |
} |
3363 |
filter->vmf_multicast.nentries = mcnt; |
3699 |
|
3364 |
|
3700 |
if (promisc != 0 && allmulti != 0) |
3365 |
if (promisc != 0 && allmulti != 0) |
3701 |
goto out; |
3366 |
goto out; |
3702 |
|
3367 |
|
3703 |
filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt); |
|
|
3704 |
filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt); |
3705 |
|
3706 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3368 |
hdr.class = VIRTIO_NET_CTRL_MAC; |
3707 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; |
3369 |
hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; |
3708 |
ack = VIRTIO_NET_ERR; |
3370 |
ack = VIRTIO_NET_ERR; |
3709 |
|
3371 |
|
3710 |
sglist_init(&sg, nitems(segs), segs); |
3372 |
sglist_init(&sg, 4, segs); |
|
|
3373 |
error = 0; |
3711 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3374 |
error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3712 |
error |= sglist_append(&sg, &filter->vmf_unicast, |
3375 |
error |= sglist_append(&sg, &filter->vmf_unicast, |
3713 |
sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN); |
3376 |
sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); |
3714 |
error |= sglist_append(&sg, &filter->vmf_multicast, |
3377 |
error |= sglist_append(&sg, &filter->vmf_multicast, |
3715 |
sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN); |
3378 |
sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); |
3716 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3379 |
error |= sglist_append(&sg, &ack, sizeof(uint8_t)); |
3717 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3380 |
KASSERT(error == 0 && sg.sg_nseg == 4, |
|
|
3381 |
("%s: error %d adding MAC filter msg to sglist", __func__, error)); |
3718 |
|
3382 |
|
3719 |
if (error == 0) |
3383 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3720 |
vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); |
3384 |
|
3721 |
if (ack != VIRTIO_NET_OK) |
3385 |
if (ack != VIRTIO_NET_OK) |
3722 |
if_printf(ifp, "error setting host MAC filter table\n"); |
3386 |
if_printf(ifp, "error setting host MAC filter table\n"); |
3723 |
|
3387 |
|
Lines 3734-3763
Link Here
|
3734 |
struct sglist_seg segs[3]; |
3398 |
struct sglist_seg segs[3]; |
3735 |
struct sglist sg; |
3399 |
struct sglist sg; |
3736 |
struct { |
3400 |
struct { |
3737 |
struct virtio_net_ctrl_hdr hdr __aligned(2); |
3401 |
struct virtio_net_ctrl_hdr hdr; |
3738 |
uint8_t pad1; |
3402 |
uint8_t pad1; |
3739 |
uint16_t tag __aligned(2); |
3403 |
uint16_t tag; |
3740 |
uint8_t pad2; |
3404 |
uint8_t pad2; |
3741 |
uint8_t ack; |
3405 |
uint8_t ack; |
3742 |
} s; |
3406 |
} s __aligned(2); |
3743 |
int error; |
3407 |
int error; |
3744 |
|
3408 |
|
3745 |
error = 0; |
|
|
3746 |
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); |
3747 |
|
3748 |
s.hdr.class = VIRTIO_NET_CTRL_VLAN; |
3409 |
s.hdr.class = VIRTIO_NET_CTRL_VLAN; |
3749 |
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; |
3410 |
s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; |
3750 |
s.tag = vtnet_gtoh16(sc, tag); |
3411 |
s.tag = tag; |
3751 |
s.ack = VIRTIO_NET_ERR; |
3412 |
s.ack = VIRTIO_NET_ERR; |
3752 |
|
3413 |
|
3753 |
sglist_init(&sg, nitems(segs), segs); |
3414 |
sglist_init(&sg, 3, segs); |
|
|
3415 |
error = 0; |
3754 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3416 |
error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); |
3755 |
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); |
3417 |
error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); |
3756 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3418 |
error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); |
3757 |
MPASS(error == 0 && sg.sg_nseg == nitems(segs)); |
3419 |
KASSERT(error == 0 && sg.sg_nseg == 3, |
|
|
3420 |
("%s: error %d adding VLAN message to sglist", __func__, error)); |
3758 |
|
3421 |
|
3759 |
if (error == 0) |
3422 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
3760 |
vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); |
|
|
3761 |
|
3423 |
|
3762 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3424 |
return (s.ack == VIRTIO_NET_OK ? 0 : EIO); |
3763 |
} |
3425 |
} |
Lines 3765-3776
Link Here
|
3765 |
static void |
3427 |
static void |
3766 |
vtnet_rx_filter_vlan(struct vtnet_softc *sc) |
3428 |
vtnet_rx_filter_vlan(struct vtnet_softc *sc) |
3767 |
{ |
3429 |
{ |
3768 |
int i, bit; |
|
|
3769 |
uint32_t w; |
3430 |
uint32_t w; |
3770 |
uint16_t tag; |
3431 |
uint16_t tag; |
|
|
3432 |
int i, bit; |
3771 |
|
3433 |
|
3772 |
MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); |
|
|
3773 |
VTNET_CORE_LOCK_ASSERT(sc); |
3434 |
VTNET_CORE_LOCK_ASSERT(sc); |
|
|
3435 |
KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, |
3436 |
("%s: VLAN_FILTER feature not negotiated", __func__)); |
3774 |
|
3437 |
|
3775 |
/* Enable the filter for each configured VLAN. */ |
3438 |
/* Enable the filter for each configured VLAN. */ |
3776 |
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { |
3439 |
for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { |
Lines 3839-3872
Link Here
|
3839 |
vtnet_update_vlan_filter(arg, 0, tag); |
3502 |
vtnet_update_vlan_filter(arg, 0, tag); |
3840 |
} |
3503 |
} |
3841 |
|
3504 |
|
3842 |
static void |
|
|
3843 |
vtnet_update_speed_duplex(struct vtnet_softc *sc) |
3844 |
{ |
3845 |
struct ifnet *ifp; |
3846 |
uint32_t speed; |
3847 |
|
3848 |
ifp = sc->vtnet_ifp; |
3849 |
|
3850 |
if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0) |
3851 |
return; |
3852 |
|
3853 |
/* BMV: Ignore duplex. */ |
3854 |
speed = virtio_read_dev_config_4(sc->vtnet_dev, |
3855 |
offsetof(struct virtio_net_config, speed)); |
3856 |
if (speed != -1) |
3857 |
ifp->if_baudrate = IF_Mbps(speed); |
3858 |
} |
3859 |
|
3860 |
static int |
3505 |
static int |
3861 |
vtnet_is_link_up(struct vtnet_softc *sc) |
3506 |
vtnet_is_link_up(struct vtnet_softc *sc) |
3862 |
{ |
3507 |
{ |
|
|
3508 |
device_t dev; |
3509 |
struct ifnet *ifp; |
3863 |
uint16_t status; |
3510 |
uint16_t status; |
3864 |
|
3511 |
|
3865 |
if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0) |
3512 |
dev = sc->vtnet_dev; |
3866 |
return (1); |
3513 |
ifp = sc->vtnet_ifp; |
3867 |
|
3514 |
|
3868 |
status = virtio_read_dev_config_2(sc->vtnet_dev, |
3515 |
if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0) |
3869 |
offsetof(struct virtio_net_config, status)); |
3516 |
status = VIRTIO_NET_S_LINK_UP; |
|
|
3517 |
else |
3518 |
status = virtio_read_dev_config_2(dev, |
3519 |
offsetof(struct virtio_net_config, status)); |
3870 |
|
3520 |
|
3871 |
return ((status & VIRTIO_NET_S_LINK_UP) != 0); |
3521 |
return ((status & VIRTIO_NET_S_LINK_UP) != 0); |
3872 |
} |
3522 |
} |
Lines 3878-3889
Link Here
|
3878 |
int link; |
3528 |
int link; |
3879 |
|
3529 |
|
3880 |
ifp = sc->vtnet_ifp; |
3530 |
ifp = sc->vtnet_ifp; |
|
|
3531 |
|
3881 |
VTNET_CORE_LOCK_ASSERT(sc); |
3532 |
VTNET_CORE_LOCK_ASSERT(sc); |
3882 |
link = vtnet_is_link_up(sc); |
3533 |
link = vtnet_is_link_up(sc); |
3883 |
|
3534 |
|
3884 |
/* Notify if the link status has changed. */ |
3535 |
/* Notify if the link status has changed. */ |
3885 |
if (link != 0 && sc->vtnet_link_active == 0) { |
3536 |
if (link != 0 && sc->vtnet_link_active == 0) { |
3886 |
vtnet_update_speed_duplex(sc); |
|
|
3887 |
sc->vtnet_link_active = 1; |
3537 |
sc->vtnet_link_active = 1; |
3888 |
if_link_state_change(ifp, LINK_STATE_UP); |
3538 |
if_link_state_change(ifp, LINK_STATE_UP); |
3889 |
} else if (link == 0 && sc->vtnet_link_active != 0) { |
3539 |
} else if (link == 0 && sc->vtnet_link_active != 0) { |
Lines 3895-3901
Link Here
|
3895 |
static int |
3545 |
static int |
3896 |
vtnet_ifmedia_upd(struct ifnet *ifp) |
3546 |
vtnet_ifmedia_upd(struct ifnet *ifp) |
3897 |
{ |
3547 |
{ |
3898 |
return (EOPNOTSUPP); |
3548 |
struct vtnet_softc *sc; |
|
|
3549 |
struct ifmedia *ifm; |
3550 |
|
3551 |
sc = ifp->if_softc; |
3552 |
ifm = &sc->vtnet_media; |
3553 |
|
3554 |
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
3555 |
return (EINVAL); |
3556 |
|
3557 |
return (0); |
3899 |
} |
3558 |
} |
3900 |
|
3559 |
|
3901 |
static void |
3560 |
static void |
Lines 3911-3955
Link Here
|
3911 |
VTNET_CORE_LOCK(sc); |
3570 |
VTNET_CORE_LOCK(sc); |
3912 |
if (vtnet_is_link_up(sc) != 0) { |
3571 |
if (vtnet_is_link_up(sc) != 0) { |
3913 |
ifmr->ifm_status |= IFM_ACTIVE; |
3572 |
ifmr->ifm_status |= IFM_ACTIVE; |
3914 |
ifmr->ifm_active |= IFM_10G_T | IFM_FDX; |
3573 |
ifmr->ifm_active |= VTNET_MEDIATYPE; |
3915 |
} else |
3574 |
} else |
3916 |
ifmr->ifm_active |= IFM_NONE; |
3575 |
ifmr->ifm_active |= IFM_NONE; |
3917 |
VTNET_CORE_UNLOCK(sc); |
3576 |
VTNET_CORE_UNLOCK(sc); |
3918 |
} |
3577 |
} |
3919 |
|
3578 |
|
3920 |
static void |
3579 |
static void |
3921 |
vtnet_get_macaddr(struct vtnet_softc *sc) |
3580 |
vtnet_set_hwaddr(struct vtnet_softc *sc) |
3922 |
{ |
3581 |
{ |
3923 |
|
|
|
3924 |
if (sc->vtnet_flags & VTNET_FLAG_MAC) { |
3925 |
virtio_read_device_config_array(sc->vtnet_dev, |
3926 |
offsetof(struct virtio_net_config, mac), |
3927 |
&sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN); |
3928 |
} else { |
3929 |
/* Generate a random locally administered unicast address. */ |
3930 |
sc->vtnet_hwaddr[0] = 0xB2; |
3931 |
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); |
3932 |
} |
3933 |
} |
3934 |
|
3935 |
static void |
3936 |
vtnet_set_macaddr(struct vtnet_softc *sc) |
3937 |
{ |
3938 |
device_t dev; |
3582 |
device_t dev; |
3939 |
int error; |
3583 |
int i; |
3940 |
|
3584 |
|
3941 |
dev = sc->vtnet_dev; |
3585 |
dev = sc->vtnet_dev; |
3942 |
|
3586 |
|
3943 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { |
3587 |
if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { |
3944 |
error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr); |
3588 |
if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) |
3945 |
if (error) |
|
|
3946 |
device_printf(dev, "unable to set MAC address\n"); |
3589 |
device_printf(dev, "unable to set MAC address\n"); |
3947 |
return; |
3590 |
} else if (sc->vtnet_flags & VTNET_FLAG_MAC) { |
3948 |
} |
3591 |
for (i = 0; i < ETHER_ADDR_LEN; i++) { |
3949 |
|
|
|
3950 |
/* MAC in config is read-only in modern VirtIO. */ |
3951 |
if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) { |
3952 |
for (int i = 0; i < ETHER_ADDR_LEN; i++) { |
3953 |
virtio_write_dev_config_1(dev, |
3592 |
virtio_write_dev_config_1(dev, |
3954 |
offsetof(struct virtio_net_config, mac) + i, |
3593 |
offsetof(struct virtio_net_config, mac) + i, |
3955 |
sc->vtnet_hwaddr[i]); |
3594 |
sc->vtnet_hwaddr[i]); |
Lines 3958-3969
Link Here
|
3958 |
} |
3597 |
} |
3959 |
|
3598 |
|
3960 |
static void |
3599 |
static void |
3961 |
vtnet_attached_set_macaddr(struct vtnet_softc *sc) |
3600 |
vtnet_get_hwaddr(struct vtnet_softc *sc) |
3962 |
{ |
3601 |
{ |
|
|
3602 |
device_t dev; |
3603 |
int i; |
3963 |
|
3604 |
|
3964 |
/* Assign MAC address if it was generated. */ |
3605 |
dev = sc->vtnet_dev; |
3965 |
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) |
3606 |
|
3966 |
vtnet_set_macaddr(sc); |
3607 |
if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { |
|
|
3608 |
/* |
3609 |
* Generate a random locally administered unicast address. |
3610 |
* |
3611 |
* It would be nice to generate the same MAC address across |
3612 |
* reboots, but it seems all the hosts currently available |
3613 |
* support the MAC feature, so this isn't too important. |
3614 |
*/ |
3615 |
sc->vtnet_hwaddr[0] = 0xB2; |
3616 |
arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); |
3617 |
vtnet_set_hwaddr(sc); |
3618 |
return; |
3619 |
} |
3620 |
|
3621 |
for (i = 0; i < ETHER_ADDR_LEN; i++) { |
3622 |
sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, |
3623 |
offsetof(struct virtio_net_config, mac) + i); |
3624 |
} |
3967 |
} |
3625 |
} |
3968 |
|
3626 |
|
3969 |
static void |
3627 |
static void |
Lines 3994-3999
Link Here
|
3994 |
} |
3652 |
} |
3995 |
|
3653 |
|
3996 |
static void |
3654 |
static void |
|
|
3655 |
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) |
3656 |
{ |
3657 |
int size, thresh; |
3658 |
|
3659 |
size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); |
3660 |
|
3661 |
/* |
3662 |
* The Tx interrupt is disabled until the queue free count falls |
3663 |
* below our threshold. Completed frames are drained from the Tx |
3664 |
* virtqueue before transmitting new frames and in the watchdog |
3665 |
* callout, so the frequency of Tx interrupts is greatly reduced, |
3666 |
* at the cost of not freeing mbufs as quickly as they otherwise |
3667 |
* would be. |
3668 |
* |
3669 |
* N.B. We assume all the Tx queues are the same size. |
3670 |
*/ |
3671 |
thresh = size / 4; |
3672 |
|
3673 |
/* |
3674 |
* Without indirect descriptors, leave enough room for the most |
3675 |
* segments we handle. |
3676 |
*/ |
3677 |
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && |
3678 |
thresh < sc->vtnet_tx_nsegs) |
3679 |
thresh = sc->vtnet_tx_nsegs; |
3680 |
|
3681 |
sc->vtnet_tx_intr_thresh = thresh; |
3682 |
} |
3683 |
|
3684 |
static void |
3997 |
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, |
3685 |
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, |
3998 |
struct sysctl_oid_list *child, struct vtnet_rxq *rxq) |
3686 |
struct sysctl_oid_list *child, struct vtnet_rxq *rxq) |
3999 |
{ |
3687 |
{ |
Lines 4021-4028
Link Here
|
4021 |
&stats->vrxs_csum, "Receive checksum offloaded"); |
3709 |
&stats->vrxs_csum, "Receive checksum offloaded"); |
4022 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, |
3710 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, |
4023 |
&stats->vrxs_csum_failed, "Receive checksum offload failed"); |
3711 |
&stats->vrxs_csum_failed, "Receive checksum offload failed"); |
4024 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD, |
|
|
4025 |
&stats->vrxs_host_lro, "Receive host segmentation offloaded"); |
4026 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
3712 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
4027 |
&stats->vrxs_rescheduled, |
3713 |
&stats->vrxs_rescheduled, |
4028 |
"Receive interrupt handler rescheduled"); |
3714 |
"Receive interrupt handler rescheduled"); |
Lines 4053-4059
Link Here
|
4053 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, |
3739 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, |
4054 |
&stats->vtxs_csum, "Transmit checksum offloaded"); |
3740 |
&stats->vtxs_csum, "Transmit checksum offloaded"); |
4055 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, |
3741 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, |
4056 |
&stats->vtxs_tso, "Transmit TCP segmentation offloaded"); |
3742 |
&stats->vtxs_tso, "Transmit segmentation offloaded"); |
4057 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
3743 |
SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, |
4058 |
&stats->vtxs_rescheduled, |
3744 |
&stats->vtxs_rescheduled, |
4059 |
"Transmit interrupt handler rescheduled"); |
3745 |
"Transmit interrupt handler rescheduled"); |
Lines 4073-4079
Link Here
|
4073 |
tree = device_get_sysctl_tree(dev); |
3759 |
tree = device_get_sysctl_tree(dev); |
4074 |
child = SYSCTL_CHILDREN(tree); |
3760 |
child = SYSCTL_CHILDREN(tree); |
4075 |
|
3761 |
|
4076 |
for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { |
3762 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { |
4077 |
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); |
3763 |
vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); |
4078 |
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); |
3764 |
vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); |
4079 |
} |
3765 |
} |
Lines 4133-4152
Link Here
|
4133 |
CTLFLAG_RD, &stats->rx_task_rescheduled, |
3819 |
CTLFLAG_RD, &stats->rx_task_rescheduled, |
4134 |
"Times the receive interrupt task rescheduled itself"); |
3820 |
"Times the receive interrupt task rescheduled itself"); |
4135 |
|
3821 |
|
4136 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype", |
3822 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", |
4137 |
CTLFLAG_RD, &stats->tx_csum_unknown_ethtype, |
3823 |
CTLFLAG_RD, &stats->tx_csum_bad_ethtype, |
4138 |
"Aborted transmit of checksum offloaded buffer with unknown " |
3824 |
"Aborted transmit of checksum offloaded buffer with unknown " |
4139 |
"Ethernet type"); |
3825 |
"Ethernet type"); |
4140 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch", |
3826 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", |
4141 |
CTLFLAG_RD, &stats->tx_csum_proto_mismatch, |
3827 |
CTLFLAG_RD, &stats->tx_tso_bad_ethtype, |
4142 |
"Aborted transmit of checksum offloaded buffer because mismatched " |
3828 |
"Aborted transmit of TSO buffer with unknown Ethernet type"); |
4143 |
"protocols"); |
|
|
4144 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", |
3829 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", |
4145 |
CTLFLAG_RD, &stats->tx_tso_not_tcp, |
3830 |
CTLFLAG_RD, &stats->tx_tso_not_tcp, |
4146 |
"Aborted transmit of TSO buffer with non TCP protocol"); |
3831 |
"Aborted transmit of TSO buffer with non TCP protocol"); |
4147 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum", |
|
|
4148 |
CTLFLAG_RD, &stats->tx_tso_without_csum, |
4149 |
"Aborted transmit of TSO buffer without TCP checksum offload"); |
4150 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", |
3832 |
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", |
4151 |
CTLFLAG_RD, &stats->tx_defragged, |
3833 |
CTLFLAG_RD, &stats->tx_defragged, |
4152 |
"Transmit mbufs defragged"); |
3834 |
"Transmit mbufs defragged"); |
Lines 4179-4188
Link Here
|
4179 |
|
3861 |
|
4180 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", |
3862 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", |
4181 |
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, |
3863 |
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, |
4182 |
"Number of maximum supported virtqueue pairs"); |
3864 |
"Maximum number of supported virtqueue pairs"); |
4183 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs", |
3865 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs", |
4184 |
CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0, |
3866 |
CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0, |
4185 |
"Number of requested virtqueue pairs"); |
3867 |
"Requested number of virtqueue pairs"); |
4186 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", |
3868 |
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", |
4187 |
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, |
3869 |
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, |
4188 |
"Number of active virtqueue pairs"); |
3870 |
"Number of active virtqueue pairs"); |
Lines 4190-4208
Link Here
|
4190 |
vtnet_setup_stat_sysctl(ctx, child, sc); |
3872 |
vtnet_setup_stat_sysctl(ctx, child, sc); |
4191 |
} |
3873 |
} |
4192 |
|
3874 |
|
4193 |
static void |
|
|
4194 |
vtnet_load_tunables(struct vtnet_softc *sc) |
4195 |
{ |
4196 |
|
4197 |
sc->vtnet_lro_entry_count = vtnet_tunable_int(sc, |
4198 |
"lro_entry_count", vtnet_lro_entry_count); |
4199 |
if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES) |
4200 |
sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES; |
4201 |
|
4202 |
sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc, |
4203 |
"lro_mbufq_depeth", vtnet_lro_mbufq_depth); |
4204 |
} |
4205 |
|
4206 |
static int |
3875 |
static int |
4207 |
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) |
3876 |
vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) |
4208 |
{ |
3877 |
{ |
Lines 4244-4257
Link Here
|
4244 |
static void |
3913 |
static void |
4245 |
vtnet_enable_rx_interrupts(struct vtnet_softc *sc) |
3914 |
vtnet_enable_rx_interrupts(struct vtnet_softc *sc) |
4246 |
{ |
3915 |
{ |
4247 |
struct vtnet_rxq *rxq; |
|
|
4248 |
int i; |
3916 |
int i; |
4249 |
|
3917 |
|
4250 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { |
3918 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4251 |
rxq = &sc->vtnet_rxqs[i]; |
3919 |
vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); |
4252 |
if (vtnet_rxq_enable_intr(rxq) != 0) |
|
|
4253 |
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); |
4254 |
} |
4255 |
} |
3920 |
} |
4256 |
|
3921 |
|
4257 |
static void |
3922 |
static void |
Lines 4276-4282
Link Here
|
4276 |
{ |
3941 |
{ |
4277 |
int i; |
3942 |
int i; |
4278 |
|
3943 |
|
4279 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) |
3944 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4280 |
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); |
3945 |
vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); |
4281 |
} |
3946 |
} |
4282 |
|
3947 |
|
Lines 4285-4291
Link Here
|
4285 |
{ |
3950 |
{ |
4286 |
int i; |
3951 |
int i; |
4287 |
|
3952 |
|
4288 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) |
3953 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4289 |
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); |
3954 |
vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); |
4290 |
} |
3955 |
} |
4291 |
|
3956 |
|
Lines 4318-4326
Link Here
|
4318 |
sc = if_getsoftc(ifp); |
3983 |
sc = if_getsoftc(ifp); |
4319 |
|
3984 |
|
4320 |
VTNET_CORE_LOCK(sc); |
3985 |
VTNET_CORE_LOCK(sc); |
4321 |
*nrxr = sc->vtnet_req_vq_pairs; |
3986 |
*nrxr = sc->vtnet_max_vq_pairs; |
4322 |
*ncl = NETDUMP_MAX_IN_FLIGHT; |
3987 |
*ncl = NETDUMP_MAX_IN_FLIGHT; |
4323 |
*clsize = sc->vtnet_rx_clustersz; |
3988 |
*clsize = sc->vtnet_rx_clsize; |
4324 |
VTNET_CORE_UNLOCK(sc); |
3989 |
VTNET_CORE_UNLOCK(sc); |
4325 |
|
3990 |
|
4326 |
/* |
3991 |
/* |
Lines 4369-4375
Link Here
|
4369 |
return (EBUSY); |
4034 |
return (EBUSY); |
4370 |
|
4035 |
|
4371 |
(void)vtnet_txq_eof(&sc->vtnet_txqs[0]); |
4036 |
(void)vtnet_txq_eof(&sc->vtnet_txqs[0]); |
4372 |
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) |
4037 |
for (i = 0; i < sc->vtnet_max_vq_pairs; i++) |
4373 |
(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]); |
4038 |
(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]); |
4374 |
return (0); |
4039 |
return (0); |
4375 |
} |
4040 |
} |