View | Details | Raw Unified | Return to bug 207297
Collapse All | Expand All

(-)sys/dev/hyperv/netvsc/hv_net_vsc.c (-3 / +9 lines)
Lines 641-646 Link Here
641
	/* sema_wait(&NetVscChannel->channel_init_sema); */
641
	/* sema_wait(&NetVscChannel->channel_init_sema); */
642
642
643
	/* Post the big receive buffer to NetVSP */
643
	/* Post the big receive buffer to NetVSP */
644
	if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
645
		net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
646
	else
647
		net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
648
	net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
649
644
	ret = hv_nv_init_rx_buffer_with_net_vsp(device);
650
	ret = hv_nv_init_rx_buffer_with_net_vsp(device);
645
	if (ret == 0)
651
	if (ret == 0)
646
		ret = hv_nv_init_send_buffer_with_net_vsp(device);
652
		ret = hv_nv_init_send_buffer_with_net_vsp(device);
Lines 675-684 Link Here
675
		goto cleanup;
681
		goto cleanup;
676
682
677
	/* Initialize the NetVSC channel extension */
683
	/* Initialize the NetVSC channel extension */
678
	net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
679
684
680
	net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
681
682
	sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");
685
	sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");
683
686
684
	/*
687
	/*
Lines 918-923 Link Here
918
	 */
921
	 */
919
	hv_nv_on_receive_completion(device, vm_xfer_page_pkt->d.transaction_id,
922
	hv_nv_on_receive_completion(device, vm_xfer_page_pkt->d.transaction_id,
920
	    status);
923
	    status);
924
	hv_rf_receive_rollup(net_dev);
921
}
925
}
922
926
923
/*
927
/*
Lines 1023-1026 Link Here
1023
1027
1024
	if (bufferlen > NETVSC_PACKET_SIZE)
1028
	if (bufferlen > NETVSC_PACKET_SIZE)
1025
		free(buffer, M_NETVSC);
1029
		free(buffer, M_NETVSC);
1030
1031
	hv_rf_channel_rollup(net_dev);
1026
}
1032
}
(-)sys/dev/hyperv/netvsc/hv_net_vsc.h (-2 / +45 lines)
Lines 38-49 Link Here
38
#ifndef __HV_NET_VSC_H__
38
#ifndef __HV_NET_VSC_H__
39
#define __HV_NET_VSC_H__
39
#define __HV_NET_VSC_H__
40
40
41
#include <sys/types.h>
42
#include <sys/param.h>
41
#include <sys/param.h>
43
#include <sys/lock.h>
42
#include <sys/lock.h>
44
#include <sys/malloc.h>
43
#include <sys/malloc.h>
44
#include <sys/queue.h>
45
#include <sys/sx.h>
45
#include <sys/sx.h>
46
46
47
#include <machine/bus.h>
48
#include <sys/bus.h>
49
#include <sys/bus_dma.h>
50
51
#include <netinet/in.h>
52
#include <netinet/tcp_lro.h>
53
54
#include <net/if.h>
55
#include <net/if_media.h>
56
47
#include <dev/hyperv/include/hyperv.h>
57
#include <dev/hyperv/include/hyperv.h>
48
58
49
MALLOC_DECLARE(M_NETVSC);
59
MALLOC_DECLARE(M_NETVSC);
Lines 851-857 Link Here
851
#define NETVSC_SEND_BUFFER_SIZE			(1024*1024*15)   /* 15M */
861
#define NETVSC_SEND_BUFFER_SIZE			(1024*1024*15)   /* 15M */
852
#define NETVSC_SEND_BUFFER_ID			0xface
862
#define NETVSC_SEND_BUFFER_ID			0xface
853
863
854
864
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024*1024*15) /* 15MB */
855
#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16) /* 16MB */
865
#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16) /* 16MB */
856
866
857
#define NETVSC_RECEIVE_BUFFER_ID		0xcafe
867
#define NETVSC_RECEIVE_BUFFER_ID		0xcafe
Lines 978-983 Link Here
978
	hv_bool_uint8_t	link_state;
988
	hv_bool_uint8_t	link_state;
979
} netvsc_device_info;
989
} netvsc_device_info;
980
990
991
struct hn_txdesc;
992
SLIST_HEAD(hn_txdesc_list, hn_txdesc);
993
981
/*
994
/*
982
 * Device-specific softc structure
995
 * Device-specific softc structure
983
 */
996
 */
Lines 984-989 Link Here
984
typedef struct hn_softc {
997
typedef struct hn_softc {
985
	struct ifnet    *hn_ifp;
998
	struct ifnet    *hn_ifp;
986
	struct arpcom   arpcom;
999
	struct arpcom   arpcom;
1000
	struct ifmedia	hn_media;
987
	device_t        hn_dev;
1001
	device_t        hn_dev;
988
	uint8_t         hn_unit;
1002
	uint8_t         hn_unit;
989
	int             hn_carrier;
1003
	int             hn_carrier;
Lines 994-999 Link Here
994
	int             temp_unusable;
1008
	int             temp_unusable;
995
	struct hv_device  *hn_dev_obj;
1009
	struct hv_device  *hn_dev_obj;
996
	netvsc_dev  	*net_dev;
1010
	netvsc_dev  	*net_dev;
1011
1012
	int		hn_txdesc_cnt;
1013
	struct hn_txdesc *hn_txdesc;
1014
	bus_dma_tag_t	hn_tx_data_dtag;
1015
	bus_dma_tag_t	hn_tx_rndis_dtag;
1016
	int		hn_tx_chimney_size;
1017
	int		hn_tx_chimney_max;
1018
1019
	struct mtx	hn_txlist_spin;
1020
	struct hn_txdesc_list hn_txlist;
1021
	int		hn_txdesc_avail;
1022
	int		hn_txeof;
1023
1024
	struct lro_ctrl	hn_lro;
1025
	int		hn_lro_hiwat;
1026
1027
	/* Trust tcp segments verification on host side */
1028
	int		hn_trust_hosttcp;
1029
1030
	u_long		hn_csum_ip;
1031
	u_long		hn_csum_tcp;
1032
	u_long		hn_csum_trusted;
1033
	u_long		hn_lro_tried;
1034
	u_long		hn_small_pkts;
1035
	u_long		hn_no_txdescs;
1036
	u_long		hn_send_failed;
1037
	u_long		hn_txdma_failed;
1038
	u_long		hn_tx_collapsed;
1039
	u_long		hn_tx_chimney;
997
} hn_softc_t;
1040
} hn_softc_t;
998
1041
999
1042
(-)sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c (-213 / +842 lines)
Lines 69-74 Link Here
69
#include <sys/queue.h>
69
#include <sys/queue.h>
70
#include <sys/lock.h>
70
#include <sys/lock.h>
71
#include <sys/sx.h>
71
#include <sys/sx.h>
72
#include <sys/sysctl.h>
72
73
73
#include <net/if.h>
74
#include <net/if.h>
74
#include <net/if_arp.h>
75
#include <net/if_arp.h>
Lines 128-141 Link Here
128
#define HV_NV_SC_PTR_OFFSET_IN_BUF         0
129
#define HV_NV_SC_PTR_OFFSET_IN_BUF         0
129
#define HV_NV_PACKET_OFFSET_IN_BUF         16
130
#define HV_NV_PACKET_OFFSET_IN_BUF         16
130
131
132
/* YYY should get it from the underlying channel */
133
#define HN_TX_DESC_CNT			512
131
134
135
#define HN_RNDIS_MSG_LEN		\
136
    (sizeof(rndis_msg) +		\
137
     RNDIS_VLAN_PPI_SIZE +		\
138
     RNDIS_TSO_PPI_SIZE +		\
139
     RNDIS_CSUM_PPI_SIZE)
140
#define HN_RNDIS_MSG_BOUNDARY		PAGE_SIZE
141
#define HN_RNDIS_MSG_ALIGN		CACHE_LINE_SIZE
142
143
#define HN_TX_DATA_BOUNDARY		PAGE_SIZE
144
#define HN_TX_DATA_MAXSIZE		IP_MAXPACKET
145
#define HN_TX_DATA_SEGSIZE		PAGE_SIZE
146
#define HN_TX_DATA_SEGCNT_MAX		\
147
    (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
148
149
struct hn_txdesc {
150
	SLIST_ENTRY(hn_txdesc) link;
151
	struct mbuf	*m;
152
	struct hn_softc	*sc;
153
	int		refs;
154
	uint32_t	flags;		/* HN_TXD_FLAG_ */
155
	netvsc_packet	netvsc_pkt;	/* XXX to be removed */
156
157
	bus_dmamap_t	data_dmap;
158
159
	bus_addr_t	rndis_msg_paddr;
160
	rndis_msg	*rndis_msg;
161
	bus_dmamap_t	rndis_msg_dmap;
162
};
163
164
#define HN_TXD_FLAG_ONLIST	0x1
165
#define HN_TXD_FLAG_DMAMAP	0x2
166
132
/*
167
/*
133
 * Data types
168
 * A unified flag for all outbound check sum flags is useful,
169
 * and it helps avoiding unnecessary check sum calculation in
170
 * network forwarding scenario.
134
 */
171
 */
172
#define HV_CSUM_FOR_OUTBOUND \
173
    (CSUM_IP|CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP|CSUM_IP_TSO|CSUM_IP_ISCSI| \
174
     CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP|CSUM_IP6_TSO|CSUM_IP6_ISCSI)
135
175
136
struct hv_netvsc_driver_context {
176
/* XXX move to netinet/tcp_lro.h */
137
	uint32_t		drv_inited;
177
#define HN_LRO_HIWAT_MAX				65535
138
};
178
#define HN_LRO_HIWAT_DEF				HN_LRO_HIWAT_MAX
179
/* YYY 2*MTU is a bit rough, but should be good enough. */
180
#define HN_LRO_HIWAT_MTULIM(ifp)			(2 * (ifp)->if_mtu)
181
#define HN_LRO_HIWAT_ISVALID(sc, hiwat)			\
182
    ((hiwat) >= HN_LRO_HIWAT_MTULIM((sc)->hn_ifp) ||	\
183
     (hiwat) <= HN_LRO_HIWAT_MAX)
139
184
140
/*
185
/*
141
 * Be aware that this sleepable mutex will exhibit WITNESS errors when
186
 * Be aware that this sleepable mutex will exhibit WITNESS errors when
Lines 159-168 Link Here
159
204
160
int hv_promisc_mode = 0;    /* normal mode by default */
205
int hv_promisc_mode = 0;    /* normal mode by default */
161
206
162
/* The one and only one */
207
/* Trust tcp segements verification on host side. */
163
static struct hv_netvsc_driver_context g_netvsc_drv;
208
static int hn_trust_hosttcp = 0;
209
TUNABLE_INT("dev.hn.trust_hosttcp", &hn_trust_hosttcp);
164
210
211
#if __FreeBSD_version >= 1100045
212
/* Limit TSO burst size */
213
static int hn_tso_maxlen = 0;
214
TUNABLE_INT("dev.hn.tso_maxlen", &hn_tso_maxlen);
215
#endif
165
216
217
/* Limit chimney send size */
218
static int hn_tx_chimney_size = 0;
219
TUNABLE_INT("dev.hn.tx_chimney_size", &hn_tx_chimney_size);
220
166
/*
221
/*
167
 * Forward declarations
222
 * Forward declarations
168
 */
223
 */
Lines 170-178 Link Here
170
static void hn_ifinit_locked(hn_softc_t *sc);
225
static void hn_ifinit_locked(hn_softc_t *sc);
171
static void hn_ifinit(void *xsc);
226
static void hn_ifinit(void *xsc);
172
static int  hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
227
static int  hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
173
static int  hn_start_locked(struct ifnet *ifp);
228
static void hn_start_locked(struct ifnet *ifp);
174
static void hn_start(struct ifnet *ifp);
229
static void hn_start(struct ifnet *ifp);
230
static int hn_ifmedia_upd(struct ifnet *ifp);
231
static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
232
#ifdef HN_LRO_HIWAT
233
static int hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS);
234
#endif
235
static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
236
static int hn_check_iplen(const struct mbuf *, int);
237
static int hn_create_tx_ring(struct hn_softc *sc);
238
static void hn_destroy_tx_ring(struct hn_softc *sc);
175
239
240
static __inline void
241
hn_set_lro_hiwat(struct hn_softc *sc, int hiwat)
242
{
243
	sc->hn_lro_hiwat = hiwat;
244
#ifdef HN_LRO_HIWAT
245
	sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat;
246
#endif
247
}
248
176
/*
249
/*
177
 * NetVsc get message transport protocol type 
250
 * NetVsc get message transport protocol type 
178
 */
251
 */
Lines 229-263 Link Here
229
	return (ret_val);
302
	return (ret_val);
230
}
303
}
231
304
232
/*
233
 * NetVsc driver initialization
234
 * Note:  Filter init is no longer required
235
 */
236
static int
305
static int
237
netvsc_drv_init(void)
306
hn_ifmedia_upd(struct ifnet *ifp __unused)
238
{
307
{
239
	return (0);
308
309
	return EOPNOTSUPP;
240
}
310
}
241
311
242
/*
243
 * NetVsc global initialization entry point
244
 */
245
static void
312
static void
246
netvsc_init(void)
313
hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
247
{
314
{
248
	if (bootverbose)
315
	struct hn_softc *sc = ifp->if_softc;
249
		printf("Netvsc initializing... ");
250
316
251
	/*
317
	ifmr->ifm_status = IFM_AVALID;
252
	 * XXXKYS: cleanup initialization
318
	ifmr->ifm_active = IFM_ETHER;
253
	 */
319
254
	if (!cold && !g_netvsc_drv.drv_inited) {
320
	if (!sc->hn_carrier) {
255
		g_netvsc_drv.drv_inited = 1;
321
		ifmr->ifm_active |= IFM_NONE;
256
		netvsc_drv_init();
322
		return;
257
		if (bootverbose)
323
	}
258
			printf("done!\n");
324
	ifmr->ifm_status |= IFM_ACTIVE;
259
	} else if (bootverbose)
325
	ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
260
		printf("Already initialized!\n");
261
}
326
}
262
327
263
/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
328
/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
Lines 300-310 Link Here
300
	netvsc_device_info device_info;
365
	netvsc_device_info device_info;
301
	hn_softc_t *sc;
366
	hn_softc_t *sc;
302
	int unit = device_get_unit(dev);
367
	int unit = device_get_unit(dev);
303
	struct ifnet *ifp;
368
	struct ifnet *ifp = NULL;
304
	int ret;
369
	struct sysctl_oid_list *child;
370
	struct sysctl_ctx_list *ctx;
371
	int error;
372
#if __FreeBSD_version >= 1100045
373
	int tso_maxlen;
374
#endif
305
375
306
	netvsc_init();
307
308
	sc = device_get_softc(dev);
376
	sc = device_get_softc(dev);
309
	if (sc == NULL) {
377
	if (sc == NULL) {
310
		return (ENOMEM);
378
		return (ENOMEM);
Lines 313-319 Link Here
313
	bzero(sc, sizeof(hn_softc_t));
381
	bzero(sc, sizeof(hn_softc_t));
314
	sc->hn_unit = unit;
382
	sc->hn_unit = unit;
315
	sc->hn_dev = dev;
383
	sc->hn_dev = dev;
384
	sc->hn_lro_hiwat = HN_LRO_HIWAT_DEF;
385
	sc->hn_trust_hosttcp = hn_trust_hosttcp;
316
386
387
	error = hn_create_tx_ring(sc);
388
	if (error)
389
		goto failed;
390
317
	NV_LOCK_INIT(sc, "NetVSCLock");
391
	NV_LOCK_INIT(sc, "NetVSCLock");
318
392
319
	sc->hn_dev_obj = device_ctx;
393
	sc->hn_dev_obj = device_ctx;
Lines 335-348 Link Here
335
	ifp->if_snd.ifq_drv_maxlen = 511;
409
	ifp->if_snd.ifq_drv_maxlen = 511;
336
	IFQ_SET_READY(&ifp->if_snd);
410
	IFQ_SET_READY(&ifp->if_snd);
337
411
412
	ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
413
	ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
414
	ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
415
	/* XXX ifmedia_set really should do this for us */
416
	sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
417
338
	/*
418
	/*
339
	 * Tell upper layers that we support full VLAN capability.
419
	 * Tell upper layers that we support full VLAN capability.
340
	 */
420
	 */
341
	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
421
	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
342
	ifp->if_capabilities |=
422
	ifp->if_capabilities |=
343
	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO;
423
	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
424
	    IFCAP_LRO;
344
	ifp->if_capenable |=
425
	ifp->if_capenable |=
345
	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO;
426
	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
427
	    IFCAP_LRO;
346
	/*
428
	/*
347
	 * Only enable UDP checksum offloading when it is on 2012R2 or
429
	 * Only enable UDP checksum offloading when it is on 2012R2 or
348
	 * later. UDP checksum offloading doesn't work on earlier
430
	 * later. UDP checksum offloading doesn't work on earlier
Lines 353-371 Link Here
353
	else
435
	else
354
		ifp->if_hwassist = CSUM_TCP | CSUM_TSO;
436
		ifp->if_hwassist = CSUM_TCP | CSUM_TSO;
355
437
356
	ret = hv_rf_on_device_add(device_ctx, &device_info);
438
	error = hv_rf_on_device_add(device_ctx, &device_info);
357
	if (ret != 0) {
439
	if (error)
358
		if_free(ifp);
440
		goto failed;
359
441
360
		return (ret);
361
	}
362
	if (device_info.link_state == 0) {
442
	if (device_info.link_state == 0) {
363
		sc->hn_carrier = 1;
443
		sc->hn_carrier = 1;
364
	}
444
	}
365
445
446
#if defined(INET) || defined(INET6)
447
	tcp_lro_init(&sc->hn_lro);
448
	/* Driver private LRO settings */
449
	sc->hn_lro.ifp = ifp;
450
#ifdef HN_LRO_HIWAT
451
	sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat;
452
#endif
453
#endif	/* INET || INET6 */
454
455
#if __FreeBSD_version >= 1100045
456
	tso_maxlen = hn_tso_maxlen;
457
	if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
458
		tso_maxlen = IP_MAXPACKET;
459
460
	ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
461
	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
462
	ifp->if_hw_tsomax = tso_maxlen -
463
	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
464
#endif
465
366
	ether_ifattach(ifp, device_info.mac_addr);
466
	ether_ifattach(ifp, device_info.mac_addr);
367
467
468
#if __FreeBSD_version >= 1100045
469
	if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
470
	    ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
471
#endif
472
473
	sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
474
	sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
475
	if (hn_tx_chimney_size > 0 &&
476
	    hn_tx_chimney_size < sc->hn_tx_chimney_max)
477
		sc->hn_tx_chimney_size = hn_tx_chimney_size;
478
479
	ctx = device_get_sysctl_ctx(dev);
480
	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
481
482
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_queued",
483
	    CTLFLAG_RW, &sc->hn_lro.lro_queued, 0, "LRO queued");
484
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_flushed",
485
	    CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed");
486
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried",
487
	    CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries");
488
#ifdef HN_LRO_HIWAT
489
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_hiwat",
490
	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_hiwat_sysctl,
491
	    "I", "LRO high watermark");
492
#endif
493
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "trust_hosttcp",
494
	    CTLFLAG_RW, &sc->hn_trust_hosttcp, 0,
495
	    "Trust tcp segement verification on host side, "
496
	    "when csum info is missing");
497
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_ip",
498
	    CTLFLAG_RW, &sc->hn_csum_ip, "RXCSUM IP");
499
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_tcp",
500
	    CTLFLAG_RW, &sc->hn_csum_tcp, "RXCSUM TCP");
501
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_trusted",
502
	    CTLFLAG_RW, &sc->hn_csum_trusted,
503
	    "# of TCP segements that we trust host's csum verification");
504
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "small_pkts",
505
	    CTLFLAG_RW, &sc->hn_small_pkts, "# of small packets received");
506
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs",
507
	    CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs");
508
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed",
509
	    CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure");
510
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed",
511
	    CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure");
512
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed",
513
	    CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed");
514
	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_chimney",
515
	    CTLFLAG_RW, &sc->hn_tx_chimney, "# of chimney send");
516
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
517
	    CTLFLAG_RD, &sc->hn_txdesc_cnt, 0, "# of total TX descs");
518
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
519
	    CTLFLAG_RD, &sc->hn_txdesc_avail, 0, "# of available TX descs");
520
	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
521
	    CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
522
	    "Chimney send packet size upper boundary");
523
	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
524
	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
525
	    "I", "Chimney send packet size limit");
526
527
	if (unit == 0) {
528
		struct sysctl_ctx_list *dc_ctx;
529
		struct sysctl_oid_list *dc_child;
530
		devclass_t dc;
531
532
		/*
533
		 * Add sysctl nodes for devclass
534
		 */
535
		dc = device_get_devclass(dev);
536
		dc_ctx = devclass_get_sysctl_ctx(dc);
537
		dc_child = SYSCTL_CHILDREN(devclass_get_sysctl_tree(dc));
538
539
		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hosttcp",
540
		    CTLFLAG_RD, &hn_trust_hosttcp, 0,
541
		    "Trust tcp segement verification on host side, "
542
		    "when csum info is missing (global setting)");
543
		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tx_chimney_size",
544
		    CTLFLAG_RD, &hn_tx_chimney_size, 0,
545
		    "Chimney send packet size limit");
546
#if __FreeBSD_version >= 1100045
547
		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tso_maxlen",
548
		    CTLFLAG_RD, &hn_tso_maxlen, 0, "TSO burst limit");
549
#endif
550
	}
551
368
	return (0);
552
	return (0);
553
failed:
554
	hn_destroy_tx_ring(sc);
555
	if (ifp != NULL)
556
		if_free(ifp);
557
	return (error);
369
}
558
}
370
559
371
/*
560
/*
Lines 374-379 Link Here
374
static int
563
static int
375
netvsc_detach(device_t dev)
564
netvsc_detach(device_t dev)
376
{
565
{
566
	struct hn_softc *sc = device_get_softc(dev);
377
	struct hv_device *hv_device = vmbus_get_devctx(dev); 
567
	struct hv_device *hv_device = vmbus_get_devctx(dev); 
378
568
379
	if (bootverbose)
569
	if (bootverbose)
Lines 392-397 Link Here
392
582
393
	hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
583
	hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
394
584
585
	ifmedia_removeall(&sc->hn_media);
586
#if defined(INET) || defined(INET6)
587
	tcp_lro_free(&sc->hn_lro);
588
#endif
589
	hn_destroy_tx_ring(sc);
590
395
	return (0);
591
	return (0);
396
}
592
}
397
593
Lines 404-409 Link Here
404
	return (0);
600
	return (0);
405
}
601
}
406
602
603
static __inline int
604
hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd,
605
    struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
606
{
607
	struct mbuf *m = *m_head;
608
	int error;
609
610
	error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, txd->data_dmap,
611
	    m, segs, nsegs, BUS_DMA_NOWAIT);
612
	if (error == EFBIG) {
613
		struct mbuf *m_new;
614
615
		m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
616
		if (m_new == NULL)
617
			return ENOBUFS;
618
		else
619
			*m_head = m = m_new;
620
		sc->hn_tx_collapsed++;
621
622
		error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag,
623
		    txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
624
	}
625
	if (!error) {
626
		bus_dmamap_sync(sc->hn_tx_data_dtag, txd->data_dmap,
627
		    BUS_DMASYNC_PREWRITE);
628
		txd->flags |= HN_TXD_FLAG_DMAMAP;
629
	}
630
	return error;
631
}
632
633
static __inline void
634
hn_txdesc_dmamap_unload(struct hn_softc *sc, struct hn_txdesc *txd)
635
{
636
637
	if (txd->flags & HN_TXD_FLAG_DMAMAP) {
638
		bus_dmamap_sync(sc->hn_tx_data_dtag,
639
		    txd->data_dmap, BUS_DMASYNC_POSTWRITE);
640
		bus_dmamap_unload(sc->hn_tx_data_dtag,
641
		    txd->data_dmap);
642
		txd->flags &= ~HN_TXD_FLAG_DMAMAP;
643
	}
644
}
645
646
static __inline int
647
hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd)
648
{
649
650
	KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
651
	    ("put an onlist txd %#x", txd->flags));
652
653
	KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
654
	if (atomic_fetchadd_int(&txd->refs, -1) != 1)
655
		return 0;
656
657
	hn_txdesc_dmamap_unload(sc, txd);
658
	if (txd->m != NULL) {
659
		m_freem(txd->m);
660
		txd->m = NULL;
661
	}
662
663
	txd->flags |= HN_TXD_FLAG_ONLIST;
664
665
	mtx_lock_spin(&sc->hn_txlist_spin);
666
	KASSERT(sc->hn_txdesc_avail >= 0 &&
667
	    sc->hn_txdesc_avail < sc->hn_txdesc_cnt,
668
	    ("txdesc_put: invalid txd avail %d", sc->hn_txdesc_avail));
669
	sc->hn_txdesc_avail++;
670
	SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
671
	mtx_unlock_spin(&sc->hn_txlist_spin);
672
673
	return 1;
674
}
675
676
static __inline struct hn_txdesc *
677
hn_txdesc_get(struct hn_softc *sc)
678
{
679
	struct hn_txdesc *txd;
680
681
	mtx_lock_spin(&sc->hn_txlist_spin);
682
	txd = SLIST_FIRST(&sc->hn_txlist);
683
	if (txd != NULL) {
684
		KASSERT(sc->hn_txdesc_avail > 0,
685
		    ("txdesc_get: invalid txd avail %d", sc->hn_txdesc_avail));
686
		sc->hn_txdesc_avail--;
687
		SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
688
	}
689
	mtx_unlock_spin(&sc->hn_txlist_spin);
690
691
	if (txd != NULL) {
692
		KASSERT(txd->m == NULL && txd->refs == 0 &&
693
		    (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
694
		txd->flags &= ~HN_TXD_FLAG_ONLIST;
695
		txd->refs = 1;
696
	}
697
	return txd;
698
}
699
700
static __inline void
701
hn_txdesc_hold(struct hn_txdesc *txd)
702
{
703
704
	/* 0->1 transition will never work */
705
	KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
706
	atomic_add_int(&txd->refs, 1);
707
}
708
407
/*
709
/*
408
 * Send completion processing
710
 * Send completion processing
409
 *
711
 *
Lines 414-447 Link Here
414
void
716
void
415
netvsc_xmit_completion(void *context)
717
netvsc_xmit_completion(void *context)
416
{
718
{
417
	netvsc_packet *packet = (netvsc_packet *)context;
719
	netvsc_packet *packet = context;
418
	struct mbuf *mb;
720
	struct hn_txdesc *txd;
419
	uint8_t *buf;
721
	struct hn_softc *sc;
420
722
421
	mb = (struct mbuf *)(uintptr_t)packet->compl.send.send_completion_tid;
723
	txd = (struct hn_txdesc *)(uintptr_t)
422
	buf = ((uint8_t *)packet) - HV_NV_PACKET_OFFSET_IN_BUF;
724
	    packet->compl.send.send_completion_tid;
423
725
424
	free(buf, M_NETVSC);
726
	sc = txd->sc;
727
	sc->hn_txeof = 1;
728
	hn_txdesc_put(sc, txd);
729
}
425
730
426
	if (mb != NULL) {
731
void
427
		m_freem(mb);
732
netvsc_channel_rollup(struct hv_device *device_ctx)
428
	}
733
{
734
	struct hn_softc *sc = device_get_softc(device_ctx->device);
735
	struct ifnet *ifp;
736
737
	if (!sc->hn_txeof)
738
		return;
739
740
	sc->hn_txeof = 0;
741
	ifp = sc->hn_ifp;
742
	NV_LOCK(sc);
743
	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
744
	hn_start_locked(ifp);
745
	NV_UNLOCK(sc);
429
}
746
}
430
747
431
/*
748
/*
432
 * Start a transmit of one or more packets
749
 * Start a transmit of one or more packets
433
 */
750
 */
434
static int
751
static void
435
hn_start_locked(struct ifnet *ifp)
752
hn_start_locked(struct ifnet *ifp)
436
{
753
{
437
	hn_softc_t *sc = ifp->if_softc;
754
	hn_softc_t *sc = ifp->if_softc;
438
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
755
	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
439
	netvsc_dev *net_dev = sc->net_dev;
756
	netvsc_dev *net_dev = sc->net_dev;
440
	device_t dev = device_ctx->device;
441
	uint8_t *buf;
442
	netvsc_packet *packet;
757
	netvsc_packet *packet;
443
	struct mbuf *m_head, *m;
758
	struct mbuf *m_head, *m;
444
	struct mbuf *mc_head = NULL;
445
	struct ether_vlan_header *eh;
759
	struct ether_vlan_header *eh;
446
	rndis_msg *rndis_mesg;
760
	rndis_msg *rndis_mesg;
447
	rndis_packet *rndis_pkt;
761
	rndis_packet *rndis_pkt;
Lines 450-533 Link Here
450
	rndis_tcp_ip_csum_info *csum_info;
764
	rndis_tcp_ip_csum_info *csum_info;
451
	rndis_tcp_tso_info *tso_info;	
765
	rndis_tcp_tso_info *tso_info;	
452
	int ether_len;
766
	int ether_len;
453
	int i;
454
	int num_frags;
455
	int len;
456
	int retries = 0;
457
	int ret = 0;	
458
	uint32_t rndis_msg_size = 0;
767
	uint32_t rndis_msg_size = 0;
459
	uint32_t trans_proto_type;
768
	uint32_t trans_proto_type;
460
	uint32_t send_buf_section_idx =
769
	uint32_t send_buf_section_idx =
461
	    NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
770
	    NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
462
771
463
	while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) {
772
	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
464
		IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head);
773
	    IFF_DRV_RUNNING)
465
		if (m_head == NULL) {
774
		return;
466
			break;
467
		}
468
775
469
		len = 0;
776
	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
470
		num_frags = 0;
777
		bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
778
		int error, nsegs, i, send_failed = 0;
779
		struct hn_txdesc *txd;
471
780
472
		/* Walk the mbuf list computing total length and num frags */
781
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
473
		for (m = m_head; m != NULL; m = m->m_next) {
782
		if (m_head == NULL)
474
			if (m->m_len != 0) {
783
			break;
475
				num_frags++;
476
				len += m->m_len;
477
			}
478
		}
479
784
480
		/*
785
		txd = hn_txdesc_get(sc);
481
		 * Reserve the number of pages requested.  Currently,
786
		if (txd == NULL) {
482
		 * one page is reserved for the message in the RNDIS
787
			sc->hn_no_txdescs++;
483
		 * filter packet
788
			IF_PREPEND(&ifp->if_snd, m_head);
484
		 */
789
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
485
		num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
790
			break;
486
487
		/* If exceeds # page_buffers in netvsc_packet */
488
		if (num_frags > NETVSC_PACKET_MAXPAGE) {
489
			device_printf(dev, "exceed max page buffers,%d,%d\n",
490
			    num_frags, NETVSC_PACKET_MAXPAGE);
491
			m_freem(m_head);
492
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
493
			return (EINVAL);
494
		}
791
		}
495
792
496
		/*
793
		packet = &txd->netvsc_pkt;
497
		 * Allocate a buffer with space for a netvsc packet plus a
794
		/* XXX not necessary */
498
		 * number of reserved areas.  First comes a (currently 16
795
		memset(packet, 0, sizeof(*packet));
499
		 * bytes, currently unused) reserved data area.  Second is
500
		 * the netvsc_packet. Third is an area reserved for an 
501
		 * rndis_filter_packet struct. Fourth (optional) is a 
502
		 * rndis_per_packet_info struct.
503
		 * Changed malloc to M_NOWAIT to avoid sleep under spin lock.
504
		 * No longer reserving extra space for page buffers, as they
505
		 * are already part of the netvsc_packet.
506
		 */
507
		buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF +
508
			sizeof(netvsc_packet) + 
509
			sizeof(rndis_msg) +
510
			RNDIS_VLAN_PPI_SIZE +
511
			RNDIS_TSO_PPI_SIZE +
512
			RNDIS_CSUM_PPI_SIZE,
513
			M_NETVSC, M_ZERO | M_NOWAIT);
514
		if (buf == NULL) {
515
			device_printf(dev, "hn:malloc packet failed\n");
516
			m_freem(m_head);
517
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
518
			return (ENOMEM);
519
		}
520
796
521
		packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF);
522
		*(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF;
523
524
		packet->is_data_pkt = TRUE;
797
		packet->is_data_pkt = TRUE;
525
798
526
		/* Set up the rndis header */
527
		packet->page_buf_count = num_frags;
528
529
		/* Initialize it from the mbuf */
799
		/* Initialize it from the mbuf */
530
		packet->tot_data_buf_len = len;
800
		packet->tot_data_buf_len = m_head->m_pkthdr.len;
531
801
532
		/*
802
		/*
533
		 * extension points to the area reserved for the
803
		 * extension points to the area reserved for the
Lines 535-542 Link Here
535
		 * the netvsc_packet (and rppi struct, if present;
805
		 * the netvsc_packet (and rppi struct, if present;
536
		 * length is updated later).
806
		 * length is updated later).
537
		 */
807
		 */
538
		packet->rndis_mesg = packet + 1;
808
		rndis_mesg = txd->rndis_msg;
539
		rndis_mesg = (rndis_msg *)packet->rndis_mesg;
809
		/* XXX not necessary */
810
		memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
540
		rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
811
		rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
541
812
542
		rndis_pkt = &rndis_mesg->msg.packet;
813
		rndis_pkt = &rndis_mesg->msg.packet;
Lines 555-562 Link Here
555
			 * set up some additional fields so the Hyper-V infrastructure will stuff the VLAN tag
826
			 * set up some additional fields so the Hyper-V infrastructure will stuff the VLAN tag
556
			 * into the frame.
827
			 * into the frame.
557
			 */
828
			 */
558
			packet->vlan_tci = m_head->m_pkthdr.ether_vtag;
559
560
			rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
829
			rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
561
830
562
			rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
831
			rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
Lines 567-576 Link Here
567
			    rppi->per_packet_info_offset);
836
			    rppi->per_packet_info_offset);
568
			/* FreeBSD does not support CFI or priority */
837
			/* FreeBSD does not support CFI or priority */
569
			rppi_vlan_info->u1.s1.vlan_id =
838
			rppi_vlan_info->u1.s1.vlan_id =
570
			    packet->vlan_tci & 0xfff;
839
			    m_head->m_pkthdr.ether_vtag & 0xfff;
571
		}
840
		}
572
841
573
		if (0 == m_head->m_pkthdr.csum_flags) {
842
		/* Only check the flags for outbount and ignore the ones for inbount */
843
		if (0 == (m_head->m_pkthdr.csum_flags & HV_CSUM_FOR_OUTBOUND)) {
574
			goto pre_send;
844
			goto pre_send;
575
		}
845
		}
576
846
Lines 668-674 Link Here
668
		packet->tot_data_buf_len = rndis_mesg->msg_len;
938
		packet->tot_data_buf_len = rndis_mesg->msg_len;
669
939
670
		/* send packet with send buffer */
940
		/* send packet with send buffer */
671
		if (packet->tot_data_buf_len < net_dev->send_section_size) {
941
		if (packet->tot_data_buf_len < sc->hn_tx_chimney_size) {
672
			send_buf_section_idx =
942
			send_buf_section_idx =
673
			    hv_nv_get_next_send_section(net_dev);
943
			    hv_nv_get_next_send_section(net_dev);
674
			if (send_buf_section_idx !=
944
			if (send_buf_section_idx !=
Lines 693-707 Link Here
693
				packet->send_buf_section_size =
963
				packet->send_buf_section_size =
694
				    packet->tot_data_buf_len;
964
				    packet->tot_data_buf_len;
695
				packet->page_buf_count = 0;
965
				packet->page_buf_count = 0;
966
				sc->hn_tx_chimney++;
696
				goto do_send;
967
				goto do_send;
697
			}
968
			}
698
		}
969
		}
699
970
971
		error = hn_txdesc_dmamap_load(sc, txd, &m_head, segs, &nsegs);
972
		if (error) {
973
			int freed;
974
975
			/*
976
			 * This mbuf is not linked w/ the txd yet, so free
977
			 * it now.
978
			 */
979
			m_freem(m_head);
980
			freed = hn_txdesc_put(sc, txd);
981
			KASSERT(freed != 0,
982
			    ("fail to free txd upon txdma error"));
983
984
			sc->hn_txdma_failed++;
985
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
986
			continue;
987
		}
988
989
		packet->page_buf_count = nsegs +
990
		    HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
991
700
		/* send packet with page buffer */
992
		/* send packet with page buffer */
701
		packet->page_buffers[0].pfn =
993
		packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
702
		    atop(hv_get_phys_addr(rndis_mesg));
703
		packet->page_buffers[0].offset =
994
		packet->page_buffers[0].offset =
704
		    (unsigned long)rndis_mesg & PAGE_MASK;
995
		    txd->rndis_msg_paddr & PAGE_MASK;
705
		packet->page_buffers[0].length = rndis_msg_size;
996
		packet->page_buffers[0].length = rndis_msg_size;
706
997
707
		/*
998
		/*
Lines 708-725 Link Here
708
		 * Fill the page buffers with mbuf info starting at index
999
		 * Fill the page buffers with mbuf info starting at index
709
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
1000
		 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
710
		 */
1001
		 */
711
		i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
1002
		for (i = 0; i < nsegs; ++i) {
712
		for (m = m_head; m != NULL; m = m->m_next) {
1003
			hv_vmbus_page_buffer *pb = &packet->page_buffers[
713
			if (m->m_len) {
1004
			    i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
714
				vm_offset_t paddr =
1005
715
				    vtophys(mtod(m, vm_offset_t));
1006
			pb->pfn = atop(segs[i].ds_addr);
716
				packet->page_buffers[i].pfn =
1007
			pb->offset = segs[i].ds_addr & PAGE_MASK;
717
				    paddr >> PAGE_SHIFT;
1008
			pb->length = segs[i].ds_len;
718
				packet->page_buffers[i].offset =
719
				    paddr & (PAGE_SIZE - 1);
720
				packet->page_buffers[i].length = m->m_len;
721
				i++;
722
			}
723
		}
1009
		}
724
1010
725
		packet->send_buf_section_idx = 
1011
		packet->send_buf_section_idx = 
Lines 727-789 Link Here
727
		packet->send_buf_section_size = 0;
1013
		packet->send_buf_section_size = 0;
728
1014
729
do_send:
1015
do_send:
1016
		txd->m = m_head;
730
1017
1018
		/* Set the completion routine */
1019
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
1020
		packet->compl.send.send_completion_context = packet;
1021
		packet->compl.send.send_completion_tid =
1022
		    (uint64_t)(uintptr_t)txd;
1023
again:
731
		/*
1024
		/*
732
		 * If bpf, copy the mbuf chain.  This is less expensive than
1025
		 * Make sure that txd is not freed before ETHER_BPF_MTAP.
733
		 * it appears; the mbuf clusters are not copied, only their
734
		 * reference counts are incremented.
735
		 * Needed to avoid a race condition where the completion
736
		 * callback is invoked, freeing the mbuf chain, before the
737
		 * bpf_mtap code has a chance to run.
738
		 */
1026
		 */
739
		if (ifp->if_bpf) {
1027
		hn_txdesc_hold(txd);
740
			mc_head = m_copypacket(m_head, M_DONTWAIT);
1028
		error = hv_nv_on_send(device_ctx, packet);
1029
		if (!error) {
1030
			ETHER_BPF_MTAP(ifp, m_head);
1031
			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
741
		}
1032
		}
742
retry_send:
1033
		hn_txdesc_put(sc, txd);
743
		/* Set the completion routine */
744
		packet->compl.send.on_send_completion = netvsc_xmit_completion;
745
		packet->compl.send.send_completion_context = packet;
746
		packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)m_head;
747
1034
748
		/* Removed critical_enter(), does not appear necessary */
1035
		if (__predict_false(error)) {
749
		ret = hv_nv_on_send(device_ctx, packet);
1036
			int freed;
750
		if (ret == 0) {
751
			ifp->if_opackets++;
752
			/* if bpf && mc_head, call bpf_mtap code */
753
			if (mc_head) {
754
				ETHER_BPF_MTAP(ifp, mc_head);
755
			}
756
		} else {
757
			retries++;
758
			if (retries < 4) {
759
				goto retry_send;
760
			}
761
1037
762
			IF_PREPEND(&ifp->if_snd, m_head);
763
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
764
765
			/*
1038
			/*
766
			 * Null the mbuf pointer so the completion function
1039
			 * This should "really rarely" happen.
767
			 * does not free the mbuf chain.  We just pushed the
1040
			 *
768
			 * mbuf chain back on the if_snd queue.
1041
			 * XXX Too many RX to be acked or too many sideband
1042
			 * commands to run?  Ask netvsc_channel_rollup()
1043
			 * to kick start later.
769
			 */
1044
			 */
770
			packet->compl.send.send_completion_tid = 0;
1045
			sc->hn_txeof = 1;
1046
			if (!send_failed) {
1047
				sc->hn_send_failed++;
1048
				send_failed = 1;
1049
				/*
1050
				 * Try sending again after set hn_txeof;
1051
				 * in case that we missed the last
1052
				 * netvsc_channel_rollup().
1053
				 */
1054
				goto again;
1055
			}
1056
			if_printf(ifp, "send failed\n");
771
1057
772
			/*
1058
			/*
773
			 * Release the resources since we will not get any
1059
			 * This mbuf will be prepended, don't free it
774
			 * send completion
1060
			 * in hn_txdesc_put(); only unload it from the
1061
			 * DMA map in hn_txdesc_put(), if it was loaded.
775
			 */
1062
			 */
776
			netvsc_xmit_completion(packet);
1063
			txd->m = NULL;
777
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1064
			freed = hn_txdesc_put(sc, txd);
778
		}
1065
			KASSERT(freed != 0,
1066
			    ("fail to free txd upon send error"));
779
1067
780
		/* if bpf && mc_head, free the mbuf chain copy */
1068
			sc->hn_send_failed++;
781
		if (mc_head) {
1069
			IF_PREPEND(&ifp->if_snd, m_head);
782
			m_freem(mc_head);
1070
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1071
			break;
783
		}
1072
		}
784
	}
1073
	}
785
786
	return (ret);
787
}
1074
}
788
1075
789
/*
1076
/*
Lines 877-883 Link Here
877
	struct mbuf *m_new;
1164
	struct mbuf *m_new;
878
	struct ifnet *ifp;
1165
	struct ifnet *ifp;
879
	device_t dev = device_ctx->device;
1166
	device_t dev = device_ctx->device;
880
	int size;
1167
	int size, do_lro = 0;
881
1168
882
	if (sc == NULL) {
1169
	if (sc == NULL) {
883
		return (0); /* TODO: KYS how can this be! */
1170
		return (0); /* TODO: KYS how can this be! */
Lines 896-935 Link Here
896
	 */
1183
	 */
897
	if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1184
	if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
898
		return (0);
1185
		return (0);
899
	}
1186
	} else if (packet->tot_data_buf_len <= MHLEN) {
1187
		m_new = m_gethdr(M_NOWAIT, MT_DATA);
1188
		if (m_new == NULL)
1189
			return (0);
1190
		memcpy(mtod(m_new, void *), packet->data,
1191
		    packet->tot_data_buf_len);
1192
		m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1193
		sc->hn_small_pkts++;
1194
	} else {
1195
		/*
1196
		 * Get an mbuf with a cluster.  For packets 2K or less,
1197
		 * get a standard 2K cluster.  For anything larger, get a
1198
		 * 4K cluster.  Any buffers larger than 4K can cause problems
1199
		 * if looped around to the Hyper-V TX channel, so avoid them.
1200
		 */
1201
		size = MCLBYTES;
1202
		if (packet->tot_data_buf_len > MCLBYTES) {
1203
			/* 4096 */
1204
			size = MJUMPAGESIZE;
1205
		}
900
1206
901
	/*
1207
		m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
902
	 * Get an mbuf with a cluster.  For packets 2K or less,
1208
		if (m_new == NULL) {
903
	 * get a standard 2K cluster.  For anything larger, get a
1209
			device_printf(dev, "alloc mbuf failed.\n");
904
	 * 4K cluster.  Any buffers larger than 4K can cause problems
1210
			return (0);
905
	 * if looped around to the Hyper-V TX channel, so avoid them.
1211
		}
906
	 */
907
	size = MCLBYTES;
908
1212
909
	if (packet->tot_data_buf_len > MCLBYTES) {
1213
		hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
910
		/* 4096 */
911
		size = MJUMPAGESIZE;
912
	}
1214
	}
913
914
	m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
915
916
	if (m_new == NULL) {
917
		device_printf(dev, "alloc mbuf failed.\n");
918
		return (0);
919
	}
920
921
	hv_m_append(m_new, packet->tot_data_buf_len,
922
			packet->data);
923
924
	m_new->m_pkthdr.rcvif = ifp;
1215
	m_new->m_pkthdr.rcvif = ifp;
925
1216
926
	/* receive side checksum offload */
1217
	/* receive side checksum offload */
927
	m_new->m_pkthdr.csum_flags = 0;
928
	if (NULL != csum_info) {
1218
	if (NULL != csum_info) {
929
		/* IP csum offload */
1219
		/* IP csum offload */
930
		if (csum_info->receive.ip_csum_succeeded) {
1220
		if (csum_info->receive.ip_csum_succeeded) {
931
			m_new->m_pkthdr.csum_flags |=
1221
			m_new->m_pkthdr.csum_flags |=
932
			    (CSUM_IP_CHECKED | CSUM_IP_VALID);
1222
			    (CSUM_IP_CHECKED | CSUM_IP_VALID);
1223
			sc->hn_csum_ip++;
933
		}
1224
		}
934
1225
935
		/* TCP csum offload */
1226
		/* TCP csum offload */
Lines 937-945 Link Here
937
			m_new->m_pkthdr.csum_flags |=
1228
			m_new->m_pkthdr.csum_flags |=
938
			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1229
			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
939
			m_new->m_pkthdr.csum_data = 0xffff;
1230
			m_new->m_pkthdr.csum_data = 0xffff;
1231
			sc->hn_csum_tcp++;
940
		}
1232
		}
1233
1234
		if (csum_info->receive.ip_csum_succeeded &&
1235
		    csum_info->receive.tcp_csum_succeeded)
1236
			do_lro = 1;
1237
	} else {
1238
		const struct ether_header *eh;
1239
		uint16_t etype;
1240
		int hoff;
1241
1242
		hoff = sizeof(*eh);
1243
		if (m_new->m_len < hoff)
1244
			goto skip;
1245
		eh = mtod(m_new, struct ether_header *);
1246
		etype = ntohs(eh->ether_type);
1247
		if (etype == ETHERTYPE_VLAN) {
1248
			const struct ether_vlan_header *evl;
1249
1250
			hoff = sizeof(*evl);
1251
			if (m_new->m_len < hoff)
1252
				goto skip;
1253
			evl = mtod(m_new, struct ether_vlan_header *);
1254
			etype = ntohs(evl->evl_proto);
1255
		}
1256
1257
		if (etype == ETHERTYPE_IP) {
1258
			int pr;
1259
1260
			pr = hn_check_iplen(m_new, hoff);
1261
			if (pr == IPPROTO_TCP) {
1262
				if (sc->hn_trust_hosttcp) {
1263
					sc->hn_csum_trusted++;
1264
					m_new->m_pkthdr.csum_flags |=
1265
					   (CSUM_IP_CHECKED | CSUM_IP_VALID |
1266
					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1267
					m_new->m_pkthdr.csum_data = 0xffff;
1268
				}
1269
				/* Rely on SW csum verification though... */
1270
				do_lro = 1;
1271
			}
1272
		}
941
	}
1273
	}
942
1274
skip:
943
	if ((packet->vlan_tci != 0) &&
1275
	if ((packet->vlan_tci != 0) &&
944
	    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1276
	    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
945
		m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1277
		m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
Lines 953-958 Link Here
953
1285
954
	ifp->if_ipackets++;
1286
	ifp->if_ipackets++;
955
1287
1288
	if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1289
#if defined(INET) || defined(INET6)
1290
		struct lro_ctrl *lro = &sc->hn_lro;
1291
1292
		if (lro->lro_cnt) {
1293
			sc->hn_lro_tried++;
1294
			if (tcp_lro_rx(lro, m_new, 0) == 0) {
1295
				/* DONE! */
1296
				return 0;
1297
			}
1298
		}
1299
#endif
1300
	}
1301
956
	/* We're not holding the lock here, so don't release it */
1302
	/* We're not holding the lock here, so don't release it */
957
	(*ifp->if_input)(ifp, m_new);
1303
	(*ifp->if_input)(ifp, m_new);
958
1304
Lines 959-964 Link Here
959
	return (0);
1305
	return (0);
960
}
1306
}
961
1307
1308
void
1309
netvsc_recv_rollup(struct hv_device *device_ctx)
1310
{
1311
#if defined(INET) || defined(INET6)
1312
	hn_softc_t *sc = device_get_softc(device_ctx->device);
1313
	struct lro_ctrl *lro = &sc->hn_lro;
1314
	struct lro_entry *queued;
1315
1316
	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1317
		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1318
		tcp_lro_flush(lro, queued);
1319
	}
1320
#endif
1321
}
1322
962
/*
1323
/*
963
 * Rules for using sc->temp_unusable:
1324
 * Rules for using sc->temp_unusable:
964
 * 1.  sc->temp_unusable can only be read or written while holding NV_LOCK()
1325
 * 1.  sc->temp_unusable can only be read or written while holding NV_LOCK()
Lines 1014-1020 Link Here
1014
1375
1015
		/* Obtain and record requested MTU */
1376
		/* Obtain and record requested MTU */
1016
		ifp->if_mtu = ifr->ifr_mtu;
1377
		ifp->if_mtu = ifr->ifr_mtu;
1017
 		
1378
		/*
1379
		 * Make sure that LRO high watermark is still valid,
1380
		 * after MTU change (the 2*MTU limit).
1381
		 */
1382
		if (!HN_LRO_HIWAT_ISVALID(sc, sc->hn_lro_hiwat))
1383
			hn_set_lro_hiwat(sc, HN_LRO_HIWAT_MTULIM(ifp));
1384
1018
		do {
1385
		do {
1019
			NV_LOCK(sc);
1386
			NV_LOCK(sc);
1020
			if (!sc->temp_unusable) {
1387
			if (!sc->temp_unusable) {
Lines 1052-1057 Link Here
1052
			break;
1419
			break;
1053
		}
1420
		}
1054
1421
1422
		sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1423
		if (sc->hn_tx_chimney_size > sc->hn_tx_chimney_max)
1424
			sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
1055
		hn_ifinit_locked(sc);
1425
		hn_ifinit_locked(sc);
1056
1426
1057
		NV_LOCK(sc);
1427
		NV_LOCK(sc);
Lines 1139-1144 Link Here
1139
				ifp->if_capenable |= IFCAP_RXCSUM;
1509
				ifp->if_capenable |= IFCAP_RXCSUM;
1140
			}
1510
			}
1141
		}
1511
		}
1512
		if (mask & IFCAP_LRO)
1513
			ifp->if_capenable ^= IFCAP_LRO;
1142
1514
1143
		if (mask & IFCAP_TSO4) {
1515
		if (mask & IFCAP_TSO4) {
1144
			ifp->if_capenable ^= IFCAP_TSO4;
1516
			ifp->if_capenable ^= IFCAP_TSO4;
Lines 1163-1172 Link Here
1163
			error = 0;
1535
			error = 0;
1164
		}
1536
		}
1165
#endif
1537
#endif
1166
		/* FALLTHROUGH */
1538
		error = EINVAL;
1539
		break;
1167
	case SIOCSIFMEDIA:
1540
	case SIOCSIFMEDIA:
1168
	case SIOCGIFMEDIA:
1541
	case SIOCGIFMEDIA:
1169
		error = EINVAL;
1542
		error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1170
		break;
1543
		break;
1171
	default:
1544
	default:
1172
		error = ether_ioctl(ifp, cmd, data);
1545
		error = ether_ioctl(ifp, cmd, data);
Lines 1284-1289 Link Here
1284
}
1657
}
1285
#endif
1658
#endif
1286
1659
1660
#ifdef HN_LRO_HIWAT
1661
static int
1662
hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS)
1663
{
1664
	struct hn_softc *sc = arg1;
1665
	int hiwat, error;
1666
1667
	hiwat = sc->hn_lro_hiwat;
1668
	error = sysctl_handle_int(oidp, &hiwat, 0, req);
1669
	if (error || req->newptr == NULL)
1670
		return error;
1671
1672
	if (!HN_LRO_HIWAT_ISVALID(sc, hiwat))
1673
		return EINVAL;
1674
1675
	if (sc->hn_lro_hiwat != hiwat)
1676
		hn_set_lro_hiwat(sc, hiwat);
1677
	return 0;
1678
}
1679
#endif	/* HN_LRO_HIWAT */
1680
1681
static int
1682
hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1683
{
1684
	struct hn_softc *sc = arg1;
1685
	int chimney_size, error;
1686
1687
	chimney_size = sc->hn_tx_chimney_size;
1688
	error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1689
	if (error || req->newptr == NULL)
1690
		return error;
1691
1692
	if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1693
		return EINVAL;
1694
1695
	if (sc->hn_tx_chimney_size != chimney_size)
1696
		sc->hn_tx_chimney_size = chimney_size;
1697
	return 0;
1698
}
1699
1700
static int
1701
hn_check_iplen(const struct mbuf *m, int hoff)
1702
{
1703
	const struct ip *ip;
1704
	int len, iphlen, iplen;
1705
	const struct tcphdr *th;
1706
	int thoff;				/* TCP data offset */
1707
1708
	len = hoff + sizeof(struct ip);
1709
1710
	/* The packet must be at least the size of an IP header. */
1711
	if (m->m_pkthdr.len < len)
1712
		return IPPROTO_DONE;
1713
1714
	/* The fixed IP header must reside completely in the first mbuf. */
1715
	if (m->m_len < len)
1716
		return IPPROTO_DONE;
1717
1718
	ip = mtodo(m, hoff);
1719
1720
	/* Bound check the packet's stated IP header length. */
1721
	iphlen = ip->ip_hl << 2;
1722
	if (iphlen < sizeof(struct ip))		/* minimum header length */
1723
		return IPPROTO_DONE;
1724
1725
	/* The full IP header must reside completely in the one mbuf. */
1726
	if (m->m_len < hoff + iphlen)
1727
		return IPPROTO_DONE;
1728
1729
	iplen = ntohs(ip->ip_len);
1730
1731
	/*
1732
	 * Check that the amount of data in the buffers is as
1733
	 * at least much as the IP header would have us expect.
1734
	 */
1735
	if (m->m_pkthdr.len < hoff + iplen)
1736
		return IPPROTO_DONE;
1737
1738
	/*
1739
	 * Ignore IP fragments.
1740
	 */
1741
	if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
1742
		return IPPROTO_DONE;
1743
1744
	/*
1745
	 * The TCP/IP or UDP/IP header must be entirely contained within
1746
	 * the first fragment of a packet.
1747
	 */
1748
	switch (ip->ip_p) {
1749
	case IPPROTO_TCP:
1750
		if (iplen < iphlen + sizeof(struct tcphdr))
1751
			return IPPROTO_DONE;
1752
		if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
1753
			return IPPROTO_DONE;
1754
		th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
1755
		thoff = th->th_off << 2;
1756
		if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
1757
			return IPPROTO_DONE;
1758
		if (m->m_len < hoff + iphlen + thoff)
1759
			return IPPROTO_DONE;
1760
		break;
1761
	case IPPROTO_UDP:
1762
		if (iplen < iphlen + sizeof(struct udphdr))
1763
			return IPPROTO_DONE;
1764
		if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
1765
			return IPPROTO_DONE;
1766
		break;
1767
	default:
1768
		if (iplen < iphlen)
1769
			return IPPROTO_DONE;
1770
		break;
1771
	}
1772
	return ip->ip_p;
1773
}
1774
1775
static void
1776
hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1777
{
1778
	bus_addr_t *paddr = arg;
1779
1780
	if (error)
1781
		return;
1782
1783
	KASSERT(nseg == 1, ("too many segments %d!", nseg));
1784
	*paddr = segs->ds_addr;
1785
}
1786
1787
static int
1788
hn_create_tx_ring(struct hn_softc *sc)
1789
{
1790
	bus_dma_tag_t parent_dtag;
1791
	int error, i;
1792
1793
	sc->hn_txdesc_cnt = HN_TX_DESC_CNT;
1794
	sc->hn_txdesc = malloc(sizeof(struct hn_txdesc) * sc->hn_txdesc_cnt,
1795
	    M_NETVSC, M_WAITOK | M_ZERO);
1796
	SLIST_INIT(&sc->hn_txlist);
1797
	mtx_init(&sc->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
1798
1799
	parent_dtag = bus_get_dma_tag(sc->hn_dev);
1800
1801
	/* DMA tag for RNDIS messages. */
1802
	error = bus_dma_tag_create(parent_dtag, /* parent */
1803
	    HN_RNDIS_MSG_ALIGN,		/* alignment */
1804
	    HN_RNDIS_MSG_BOUNDARY,	/* boundary */
1805
	    BUS_SPACE_MAXADDR,		/* lowaddr */
1806
	    BUS_SPACE_MAXADDR,		/* highaddr */
1807
	    NULL, NULL,			/* filter, filterarg */
1808
	    HN_RNDIS_MSG_LEN,		/* maxsize */
1809
	    1,				/* nsegments */
1810
	    HN_RNDIS_MSG_LEN,		/* maxsegsize */
1811
	    0,				/* flags */
1812
	    NULL,			/* lockfunc */
1813
	    NULL,			/* lockfuncarg */
1814
	    &sc->hn_tx_rndis_dtag);
1815
	if (error) {
1816
		device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
1817
		return error;
1818
	}
1819
1820
	/* DMA tag for data. */
1821
	error = bus_dma_tag_create(parent_dtag, /* parent */
1822
	    1,				/* alignment */
1823
	    HN_TX_DATA_BOUNDARY,	/* boundary */
1824
	    BUS_SPACE_MAXADDR,		/* lowaddr */
1825
	    BUS_SPACE_MAXADDR,		/* highaddr */
1826
	    NULL, NULL,			/* filter, filterarg */
1827
	    HN_TX_DATA_MAXSIZE,		/* maxsize */
1828
	    HN_TX_DATA_SEGCNT_MAX,	/* nsegments */
1829
	    HN_TX_DATA_SEGSIZE,		/* maxsegsize */
1830
	    0,				/* flags */
1831
	    NULL,			/* lockfunc */
1832
	    NULL,			/* lockfuncarg */
1833
	    &sc->hn_tx_data_dtag);
1834
	if (error) {
1835
		device_printf(sc->hn_dev, "failed to create data dmatag\n");
1836
		return error;
1837
	}
1838
1839
	for (i = 0; i < sc->hn_txdesc_cnt; ++i) {
1840
		struct hn_txdesc *txd = &sc->hn_txdesc[i];
1841
1842
		txd->sc = sc;
1843
1844
		/*
1845
		 * Allocate and load RNDIS messages.
1846
		 */
1847
        	error = bus_dmamem_alloc(sc->hn_tx_rndis_dtag,
1848
		    (void **)&txd->rndis_msg,
1849
		    BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1850
		    &txd->rndis_msg_dmap);
1851
		if (error) {
1852
			device_printf(sc->hn_dev,
1853
			    "failed to allocate rndis_msg, %d\n", i);
1854
			return error;
1855
		}
1856
1857
		error = bus_dmamap_load(sc->hn_tx_rndis_dtag,
1858
		    txd->rndis_msg_dmap,
1859
		    txd->rndis_msg, HN_RNDIS_MSG_LEN,
1860
		    hn_dma_map_paddr, &txd->rndis_msg_paddr,
1861
		    BUS_DMA_NOWAIT);
1862
		if (error) {
1863
			device_printf(sc->hn_dev,
1864
			    "failed to load rndis_msg, %d\n", i);
1865
			bus_dmamem_free(sc->hn_tx_rndis_dtag,
1866
			    txd->rndis_msg, txd->rndis_msg_dmap);
1867
			return error;
1868
		}
1869
1870
		/* DMA map for TX data. */
1871
		error = bus_dmamap_create(sc->hn_tx_data_dtag, 0,
1872
		    &txd->data_dmap);
1873
		if (error) {
1874
			device_printf(sc->hn_dev,
1875
			    "failed to allocate tx data dmamap\n");
1876
			bus_dmamap_unload(sc->hn_tx_rndis_dtag,
1877
			    txd->rndis_msg_dmap);
1878
			bus_dmamem_free(sc->hn_tx_rndis_dtag,
1879
			    txd->rndis_msg, txd->rndis_msg_dmap);
1880
			return error;
1881
		}
1882
1883
		/* All set, put it to list */
1884
		txd->flags |= HN_TXD_FLAG_ONLIST;
1885
		SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
1886
	}
1887
	sc->hn_txdesc_avail = sc->hn_txdesc_cnt;
1888
1889
	return 0;
1890
}
1891
1892
static void
1893
hn_destroy_tx_ring(struct hn_softc *sc)
1894
{
1895
	struct hn_txdesc *txd;
1896
1897
	while ((txd = SLIST_FIRST(&sc->hn_txlist)) != NULL) {
1898
		KASSERT(txd->m == NULL, ("still has mbuf installed"));
1899
		KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0,
1900
		    ("still dma mapped"));
1901
		SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
1902
1903
		bus_dmamap_unload(sc->hn_tx_rndis_dtag,
1904
		    txd->rndis_msg_dmap);
1905
		bus_dmamem_free(sc->hn_tx_rndis_dtag,
1906
		    txd->rndis_msg, txd->rndis_msg_dmap);
1907
1908
		bus_dmamap_destroy(sc->hn_tx_data_dtag, txd->data_dmap);
1909
	}
1910
1911
	if (sc->hn_tx_data_dtag != NULL)
1912
		bus_dma_tag_destroy(sc->hn_tx_data_dtag);
1913
	if (sc->hn_tx_rndis_dtag != NULL)
1914
		bus_dma_tag_destroy(sc->hn_tx_rndis_dtag);
1915
	free(sc->hn_txdesc, M_NETVSC);
1916
	mtx_destroy(&sc->hn_txlist_spin);
1917
}
1918
1287
static device_method_t netvsc_methods[] = {
1919
static device_method_t netvsc_methods[] = {
1288
        /* Device interface */
1920
        /* Device interface */
1289
        DEVMETHOD(device_probe,         netvsc_probe),
1921
        DEVMETHOD(device_probe,         netvsc_probe),
Lines 1305-1310 Link Here
1305
DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
1937
DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
1306
MODULE_VERSION(hn, 1);
1938
MODULE_VERSION(hn, 1);
1307
MODULE_DEPEND(hn, vmbus, 1, 1, 1);
1939
MODULE_DEPEND(hn, vmbus, 1, 1, 1);
1308
SYSINIT(netvsc_initx, SI_SUB_KTHREAD_IDLE, SI_ORDER_MIDDLE + 1, netvsc_init,
1309
     NULL);
1310
(-)sys/dev/hyperv/netvsc/hv_rndis.h (+2 lines)
Lines 1049-1054 Link Here
1049
int netvsc_recv(struct hv_device *device_ctx, 
1049
int netvsc_recv(struct hv_device *device_ctx, 
1050
    netvsc_packet *packet, 
1050
    netvsc_packet *packet, 
1051
    rndis_tcp_ip_csum_info *csum_info);
1051
    rndis_tcp_ip_csum_info *csum_info);
1052
void netvsc_recv_rollup(struct hv_device *device_ctx);
1053
void netvsc_channel_rollup(struct hv_device *device_ctx);
1052
1054
1053
void* hv_set_rppi_data(rndis_msg *rndis_mesg,
1055
void* hv_set_rppi_data(rndis_msg *rndis_mesg,
1054
    uint32_t rppi_size,
1056
    uint32_t rppi_size,
(-)sys/dev/hyperv/netvsc/hv_rndis_filter.c (+29 lines)
Lines 963-965 Link Here
963
	request->halt_complete_flag = 1;
963
	request->halt_complete_flag = 1;
964
}
964
}
965
965
966
/*
967
 * RNDIS filter when "all" reception is done
968
 */
969
void
970
hv_rf_receive_rollup(netvsc_dev *net_dev)
971
{
972
	rndis_device *rndis_dev;
973
974
	rndis_dev = (rndis_device *)net_dev->extension;
975
	netvsc_recv_rollup(rndis_dev->net_dev->dev);
976
}
977
978
void
979
hv_rf_channel_rollup(netvsc_dev *net_dev)
980
{
981
	rndis_device *rndis_dev;
982
983
	rndis_dev = (rndis_device *)net_dev->extension;
984
985
	/*
986
	 * This could be called pretty early, so we need
987
	 * to make sure everything has been setup.
988
	 */
989
	if (rndis_dev == NULL ||
990
	    rndis_dev->net_dev == NULL ||
991
	    rndis_dev->net_dev->dev == NULL)
992
		return;
993
	netvsc_channel_rollup(rndis_dev->net_dev->dev);
994
}
(-)sys/dev/hyperv/netvsc/hv_rndis_filter.h (+2 lines)
Lines 98-103 Link Here
98
98
99
int hv_rf_on_receive(netvsc_dev *net_dev,
99
int hv_rf_on_receive(netvsc_dev *net_dev,
100
    struct hv_device *device, netvsc_packet *pkt);
100
    struct hv_device *device, netvsc_packet *pkt);
101
void hv_rf_receive_rollup(netvsc_dev *net_dev);
102
void hv_rf_channel_rollup(netvsc_dev *net_dev);
101
int hv_rf_on_device_add(struct hv_device *device, void *additl_info);
103
int hv_rf_on_device_add(struct hv_device *device, void *additl_info);
102
int hv_rf_on_device_remove(struct hv_device *device, boolean_t destroy_channel);
104
int hv_rf_on_device_remove(struct hv_device *device, boolean_t destroy_channel);
103
int hv_rf_on_open(struct hv_device *device);
105
int hv_rf_on_open(struct hv_device *device);
(-)sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (-9 / +88 lines)
Lines 1525-1536 Link Here
1525
{
1525
{
1526
	struct hv_sgl_node *sgl_node = NULL;
1526
	struct hv_sgl_node *sgl_node = NULL;
1527
1527
1528
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1528
	if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1529
	LIST_REMOVE(sgl_node, link);
1530
	if (NULL == sgl_node) {
1531
		printf("storvsc error: not enough in use sgl\n");
1529
		printf("storvsc error: not enough in use sgl\n");
1532
		return;
1530
		return;
1533
	}
1531
	}
1532
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1533
	LIST_REMOVE(sgl_node, link);
1534
	sgl_node->sgl_data = sgl;
1534
	sgl_node->sgl_data = sgl;
1535
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1535
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1536
}
1536
}
Lines 1556-1567 Link Here
1556
	struct hv_sgl_node *sgl_node = NULL;	
1556
	struct hv_sgl_node *sgl_node = NULL;	
1557
1557
1558
	/* get struct sglist from free_sgl_list */
1558
	/* get struct sglist from free_sgl_list */
1559
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1559
	if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1560
	LIST_REMOVE(sgl_node, link);
1561
	if (NULL == sgl_node) {
1562
		printf("storvsc error: not enough free sgl\n");
1560
		printf("storvsc error: not enough free sgl\n");
1563
		return NULL;
1561
		return NULL;
1564
	}
1562
	}
1563
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1564
	LIST_REMOVE(sgl_node, link);
1565
	bounce_sgl = sgl_node->sgl_data;
1565
	bounce_sgl = sgl_node->sgl_data;
1566
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1566
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1567
1567
Lines 1924-1929 Link Here
1924
}
1924
}
1925
1925
1926
/**
1926
/**
1927
 * Modified based on scsi_print_inquiry which is responsible to
1928
 * print the detail information for scsi_inquiry_data
1929
 * return 1 if it is valid, 0 otherwise.
1930
 */
1931
static inline int
1932
is_scsi_valid(const struct scsi_inquiry_data *inq_data)
1933
{
1934
	u_int8_t type;
1935
	char vendor[16], product[48], revision[16];
1936
	/**
1937
	 * Check device type and qualifier
1938
	 */
1939
	if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ||
1940
	    SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED)) {
1941
		return (0);
1942
	}
1943
1944
	type = SID_TYPE(inq_data);
1945
	switch (type) {
1946
	case T_DIRECT:
1947
	case T_SEQUENTIAL:
1948
	case T_PRINTER:
1949
	case T_PROCESSOR:
1950
	case T_WORM:
1951
	case T_CDROM:
1952
	case T_SCANNER:
1953
	case T_OPTICAL:
1954
	case T_CHANGER:
1955
	case T_COMM:
1956
	case T_STORARRAY:
1957
	case T_ENCLOSURE:
1958
	case T_RBC:
1959
	case T_OCRW:
1960
	case T_OSD:
1961
	case T_ADC:
1962
		break;
1963
	case T_NODEVICE:
1964
		return (0);
1965
	default:
1966
		return (0);
1967
	}
1968
	/**
1969
	 * Check vendor, product, and revision
1970
	 */
1971
	cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
1972
		   sizeof(vendor));
1973
	cam_strvis(product, inq_data->product, sizeof(inq_data->product),
1974
		   sizeof(product));
1975
	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
1976
		   sizeof(revision));
1977
	if (strlen(vendor) == 0  ||
1978
	    strlen(product) == 0 ||
1979
	    strlen(revision) == 0) {
1980
		return (0);
1981
	}
1982
	return (1);
1983
}
1984
/**
1927
 * @brief completion function before returning to CAM
1985
 * @brief completion function before returning to CAM
1928
 *
1986
 *
1929
 * I/O process has been completed and the result needs
1987
 * I/O process has been completed and the result needs
Lines 1992-2003 Link Here
1992
2050
1993
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2051
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1994
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2052
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2053
	/**
2054
	 * check whether the SCSI device is valid for INQUIRY cmd.
2055
	 * windows 10 and windows 2016 sends wrong information
2056
	 * to VM for unknown reason. That is why there is is_scsi_valid
2057
	 * check here.
2058
	 */
2059
        const struct scsi_generic *cmd;
2060
        cmd = (const struct scsi_generic *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2061
                csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2062
1995
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2063
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1996
		ccb->ccb_h.status |= CAM_REQ_CMP;
2064
		if (cmd->opcode == INQUIRY &&
1997
	 } else {
2065
		     is_scsi_valid((struct scsi_inquiry_data *)csio->data_ptr) == 0) {
2066
			ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2067
			if (bootverbose) {
2068
				mtx_lock(&sc->hs_lock);
2069
				xpt_print(ccb->ccb_h.path, 
2070
					"storvsc uninstalled device\n");
2071
				mtx_unlock(&sc->hs_lock);
2072
			}
2073
		} else {
2074
			ccb->ccb_h.status |= CAM_REQ_CMP;
2075
		}
2076
	} else {
1998
		mtx_lock(&sc->hs_lock);
2077
		mtx_lock(&sc->hs_lock);
1999
		xpt_print(ccb->ccb_h.path,
2078
		xpt_print(ccb->ccb_h.path,
2000
			"srovsc scsi_status = %d\n",
2079
			"storvsc scsi_status = %d\n",
2001
			vm_srb->scsi_status);
2080
			vm_srb->scsi_status);
2002
		mtx_unlock(&sc->hs_lock);
2081
		mtx_unlock(&sc->hs_lock);
2003
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2082
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;

Return to bug 207297