View | Details | Raw Unified | Return to bug 206630
Collapse All | Expand All

(-)b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (-3 / +82 lines)
Lines 1-2063 Link Here
1
/*-
1
/*-
2
 * Copyright (c) 2009-2012 Microsoft Corp.
2
 * Copyright (c) 2009-2012 Microsoft Corp.
3
 * Copyright (c) 2012 NetApp Inc.
3
 * Copyright (c) 2012 NetApp Inc.
4
 * Copyright (c) 2012 Citrix Inc.
4
 * Copyright (c) 2012 Citrix Inc.
5
 * All rights reserved.
5
 * All rights reserved.
6
 *
6
 *
7
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
8
 * modification, are permitted provided that the following conditions
9
 * are met:
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
15
 *    documentation and/or other materials provided with the distribution.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
28
29
/**
29
/**
30
 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
30
 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
31
 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
31
 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
32
 * converted into VSCSI protocol messages which are delivered to the parent
32
 * converted into VSCSI protocol messages which are delivered to the parent
33
 * partition StorVSP driver over the Hyper-V VMBUS.
33
 * partition StorVSP driver over the Hyper-V VMBUS.
34
 */
34
 */
35
#include <sys/cdefs.h>
35
#include <sys/cdefs.h>
36
__FBSDID("$FreeBSD$");
36
__FBSDID("$FreeBSD$");
37
37
38
#include <sys/param.h>
38
#include <sys/param.h>
39
#include <sys/proc.h>
39
#include <sys/proc.h>
40
#include <sys/condvar.h>
40
#include <sys/condvar.h>
41
#include <sys/time.h>
41
#include <sys/time.h>
42
#include <sys/systm.h>
42
#include <sys/systm.h>
43
#include <sys/sockio.h>
43
#include <sys/sockio.h>
44
#include <sys/mbuf.h>
44
#include <sys/mbuf.h>
45
#include <sys/malloc.h>
45
#include <sys/malloc.h>
46
#include <sys/module.h>
46
#include <sys/module.h>
47
#include <sys/kernel.h>
47
#include <sys/kernel.h>
48
#include <sys/queue.h>
48
#include <sys/queue.h>
49
#include <sys/lock.h>
49
#include <sys/lock.h>
50
#include <sys/sx.h>
50
#include <sys/sx.h>
51
#include <sys/taskqueue.h>
51
#include <sys/taskqueue.h>
52
#include <sys/bus.h>
52
#include <sys/bus.h>
53
#include <sys/mutex.h>
53
#include <sys/mutex.h>
54
#include <sys/callout.h>
54
#include <sys/callout.h>
55
#include <vm/vm.h>
55
#include <vm/vm.h>
56
#include <vm/pmap.h>
56
#include <vm/pmap.h>
57
#include <vm/uma.h>
57
#include <vm/uma.h>
58
#include <sys/lock.h>
58
#include <sys/lock.h>
59
#include <sys/sema.h>
59
#include <sys/sema.h>
60
#include <sys/sglist.h>
60
#include <sys/sglist.h>
61
#include <machine/bus.h>
61
#include <machine/bus.h>
62
#include <sys/bus_dma.h>
62
#include <sys/bus_dma.h>
63
63
64
#include <cam/cam.h>
64
#include <cam/cam.h>
65
#include <cam/cam_ccb.h>
65
#include <cam/cam_ccb.h>
66
#include <cam/cam_periph.h>
66
#include <cam/cam_periph.h>
67
#include <cam/cam_sim.h>
67
#include <cam/cam_sim.h>
68
#include <cam/cam_xpt_sim.h>
68
#include <cam/cam_xpt_sim.h>
69
#include <cam/cam_xpt_internal.h>
69
#include <cam/cam_xpt_internal.h>
70
#include <cam/cam_debug.h>
70
#include <cam/cam_debug.h>
71
#include <cam/scsi/scsi_all.h>
71
#include <cam/scsi/scsi_all.h>
72
#include <cam/scsi/scsi_message.h>
72
#include <cam/scsi/scsi_message.h>
73
73
74
#include <dev/hyperv/include/hyperv.h>
74
#include <dev/hyperv/include/hyperv.h>
75
#include "hv_vstorage.h"
75
#include "hv_vstorage.h"
76
76
77
#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
77
#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
78
#define STORVSC_MAX_LUNS_PER_TARGET	(64)
78
#define STORVSC_MAX_LUNS_PER_TARGET	(64)
79
#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
79
#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
80
#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
80
#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
81
#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
81
#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
82
#define STORVSC_MAX_TARGETS		(2)
82
#define STORVSC_MAX_TARGETS		(2)
83
83
84
#define STORVSC_WIN7_MAJOR 4
84
#define STORVSC_WIN7_MAJOR 4
85
#define STORVSC_WIN7_MINOR 2
85
#define STORVSC_WIN7_MINOR 2
86
86
87
#define STORVSC_WIN8_MAJOR 5
87
#define STORVSC_WIN8_MAJOR 5
88
#define STORVSC_WIN8_MINOR 1
88
#define STORVSC_WIN8_MINOR 1
89
89
90
#define VSTOR_PKT_SIZE	(sizeof(struct vstor_packet) - vmscsi_size_delta)
90
#define VSTOR_PKT_SIZE	(sizeof(struct vstor_packet) - vmscsi_size_delta)
91
91
92
#define HV_ALIGN(x, a) roundup2(x, a)
92
#define HV_ALIGN(x, a) roundup2(x, a)
93
93
94
struct storvsc_softc;
94
struct storvsc_softc;
95
95
96
struct hv_sgl_node {
96
struct hv_sgl_node {
97
	LIST_ENTRY(hv_sgl_node) link;
97
	LIST_ENTRY(hv_sgl_node) link;
98
	struct sglist *sgl_data;
98
	struct sglist *sgl_data;
99
};
99
};
100
100
101
struct hv_sgl_page_pool{
101
struct hv_sgl_page_pool{
102
	LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
102
	LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
103
	LIST_HEAD(, hv_sgl_node) free_sgl_list;
103
	LIST_HEAD(, hv_sgl_node) free_sgl_list;
104
	boolean_t                is_init;
104
	boolean_t                is_init;
105
} g_hv_sgl_page_pool;
105
} g_hv_sgl_page_pool;
106
106
107
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
107
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
108
108
109
enum storvsc_request_type {
109
enum storvsc_request_type {
110
	WRITE_TYPE,
110
	WRITE_TYPE,
111
	READ_TYPE,
111
	READ_TYPE,
112
	UNKNOWN_TYPE
112
	UNKNOWN_TYPE
113
};
113
};
114
114
115
struct hv_storvsc_request {
115
struct hv_storvsc_request {
116
	LIST_ENTRY(hv_storvsc_request) link;
116
	LIST_ENTRY(hv_storvsc_request) link;
117
	struct vstor_packet	vstor_packet;
117
	struct vstor_packet	vstor_packet;
118
	hv_vmbus_multipage_buffer data_buf;
118
	hv_vmbus_multipage_buffer data_buf;
119
	void *sense_data;
119
	void *sense_data;
120
	uint8_t sense_info_len;
120
	uint8_t sense_info_len;
121
	uint8_t retries;
121
	uint8_t retries;
122
	union ccb *ccb;
122
	union ccb *ccb;
123
	struct storvsc_softc *softc;
123
	struct storvsc_softc *softc;
124
	struct callout callout;
124
	struct callout callout;
125
	struct sema synch_sema; /*Synchronize the request/response if needed */
125
	struct sema synch_sema; /*Synchronize the request/response if needed */
126
	struct sglist *bounce_sgl;
126
	struct sglist *bounce_sgl;
127
	unsigned int bounce_sgl_count;
127
	unsigned int bounce_sgl_count;
128
	uint64_t not_aligned_seg_bits;
128
	uint64_t not_aligned_seg_bits;
129
};
129
};
130
130
131
struct storvsc_softc {
131
struct storvsc_softc {
132
	struct hv_device		*hs_dev;
132
	struct hv_device		*hs_dev;
133
	LIST_HEAD(, hv_storvsc_request)	hs_free_list;
133
	LIST_HEAD(, hv_storvsc_request)	hs_free_list;
134
	struct mtx			hs_lock;
134
	struct mtx			hs_lock;
135
	struct storvsc_driver_props	*hs_drv_props;
135
	struct storvsc_driver_props	*hs_drv_props;
136
	int 				hs_unit;
136
	int 				hs_unit;
137
	uint32_t			hs_frozen;
137
	uint32_t			hs_frozen;
138
	struct cam_sim			*hs_sim;
138
	struct cam_sim			*hs_sim;
139
	struct cam_path 		*hs_path;
139
	struct cam_path 		*hs_path;
140
	uint32_t			hs_num_out_reqs;
140
	uint32_t			hs_num_out_reqs;
141
	boolean_t			hs_destroy;
141
	boolean_t			hs_destroy;
142
	boolean_t			hs_drain_notify;
142
	boolean_t			hs_drain_notify;
143
	boolean_t			hs_open_multi_channel;
143
	boolean_t			hs_open_multi_channel;
144
	struct sema 			hs_drain_sema;	
144
	struct sema 			hs_drain_sema;	
145
	struct hv_storvsc_request	hs_init_req;
145
	struct hv_storvsc_request	hs_init_req;
146
	struct hv_storvsc_request	hs_reset_req;
146
	struct hv_storvsc_request	hs_reset_req;
147
};
147
};
148
148
149
149
150
/**
150
/**
151
 * HyperV storvsc timeout testing cases:
151
 * HyperV storvsc timeout testing cases:
152
 * a. IO returned after first timeout;
152
 * a. IO returned after first timeout;
153
 * b. IO returned after second timeout and queue freeze;
153
 * b. IO returned after second timeout and queue freeze;
154
 * c. IO returned while timer handler is running
154
 * c. IO returned while timer handler is running
155
 * The first can be tested by "sg_senddiag -vv /dev/daX",
155
 * The first can be tested by "sg_senddiag -vv /dev/daX",
156
 * and the second and third can be done by
156
 * and the second and third can be done by
157
 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
157
 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
158
 */
158
 */
159
#define HVS_TIMEOUT_TEST 0
159
#define HVS_TIMEOUT_TEST 0
160
160
161
/*
161
/*
162
 * Bus/adapter reset functionality on the Hyper-V host is
162
 * Bus/adapter reset functionality on the Hyper-V host is
163
 * buggy and it will be disabled until
163
 * buggy and it will be disabled until
164
 * it can be further tested.
164
 * it can be further tested.
165
 */
165
 */
166
#define HVS_HOST_RESET 0
166
#define HVS_HOST_RESET 0
167
167
168
struct storvsc_driver_props {
168
struct storvsc_driver_props {
169
	char		*drv_name;
169
	char		*drv_name;
170
	char		*drv_desc;
170
	char		*drv_desc;
171
	uint8_t		drv_max_luns_per_target;
171
	uint8_t		drv_max_luns_per_target;
172
	uint8_t		drv_max_ios_per_target;
172
	uint8_t		drv_max_ios_per_target;
173
	uint32_t	drv_ringbuffer_size;
173
	uint32_t	drv_ringbuffer_size;
174
};
174
};
175
175
176
enum hv_storage_type {
176
enum hv_storage_type {
177
	DRIVER_BLKVSC,
177
	DRIVER_BLKVSC,
178
	DRIVER_STORVSC,
178
	DRIVER_STORVSC,
179
	DRIVER_UNKNOWN
179
	DRIVER_UNKNOWN
180
};
180
};
181
181
182
#define HS_MAX_ADAPTERS 10
182
#define HS_MAX_ADAPTERS 10
183
183
184
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
184
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
185
185
186
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
186
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
187
static const hv_guid gStorVscDeviceType={
187
static const hv_guid gStorVscDeviceType={
188
	.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
188
	.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
189
		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
189
		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
190
};
190
};
191
191
192
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
192
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
193
static const hv_guid gBlkVscDeviceType={
193
static const hv_guid gBlkVscDeviceType={
194
	.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
194
	.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
195
		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
195
		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
196
};
196
};
197
197
198
static struct storvsc_driver_props g_drv_props_table[] = {
198
static struct storvsc_driver_props g_drv_props_table[] = {
199
	{"blkvsc", "Hyper-V IDE Storage Interface",
199
	{"blkvsc", "Hyper-V IDE Storage Interface",
200
	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
200
	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
201
	 STORVSC_RINGBUFFER_SIZE},
201
	 STORVSC_RINGBUFFER_SIZE},
202
	{"storvsc", "Hyper-V SCSI Storage Interface",
202
	{"storvsc", "Hyper-V SCSI Storage Interface",
203
	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
203
	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
204
	 STORVSC_RINGBUFFER_SIZE}
204
	 STORVSC_RINGBUFFER_SIZE}
205
};
205
};
206
206
207
/*
207
/*
208
 * Sense buffer size changed in win8; have a run-time
208
 * Sense buffer size changed in win8; have a run-time
209
 * variable to track the size we should use.
209
 * variable to track the size we should use.
210
 */
210
 */
211
static int sense_buffer_size;
211
static int sense_buffer_size;
212
212
213
/*
213
/*
214
 * The size of the vmscsi_request has changed in win8. The
214
 * The size of the vmscsi_request has changed in win8. The
215
 * additional size is for the newly added elements in the
215
 * additional size is for the newly added elements in the
216
 * structure. These elements are valid only when we are talking
216
 * structure. These elements are valid only when we are talking
217
 * to a win8 host.
217
 * to a win8 host.
218
 * Track the correct size we need to apply.
218
 * Track the correct size we need to apply.
219
 */
219
 */
220
static int vmscsi_size_delta;
220
static int vmscsi_size_delta;
221
221
222
static int storvsc_current_major;
222
static int storvsc_current_major;
223
static int storvsc_current_minor;
223
static int storvsc_current_minor;
224
224
225
/* static functions */
225
/* static functions */
226
static int storvsc_probe(device_t dev);
226
static int storvsc_probe(device_t dev);
227
static int storvsc_attach(device_t dev);
227
static int storvsc_attach(device_t dev);
228
static int storvsc_detach(device_t dev);
228
static int storvsc_detach(device_t dev);
229
static void storvsc_poll(struct cam_sim * sim);
229
static void storvsc_poll(struct cam_sim * sim);
230
static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
230
static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
231
static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
231
static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
232
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
232
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
233
static enum hv_storage_type storvsc_get_storage_type(device_t dev);
233
static enum hv_storage_type storvsc_get_storage_type(device_t dev);
234
static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
234
static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
235
static void hv_storvsc_on_channel_callback(void *context);
235
static void hv_storvsc_on_channel_callback(void *context);
236
static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
236
static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
237
					struct vstor_packet *vstor_packet,
237
					struct vstor_packet *vstor_packet,
238
					struct hv_storvsc_request *request);
238
					struct hv_storvsc_request *request);
239
static int hv_storvsc_connect_vsp(struct hv_device *device);
239
static int hv_storvsc_connect_vsp(struct hv_device *device);
240
static void storvsc_io_done(struct hv_storvsc_request *reqp);
240
static void storvsc_io_done(struct hv_storvsc_request *reqp);
241
static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
241
static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
242
				bus_dma_segment_t *orig_sgl,
242
				bus_dma_segment_t *orig_sgl,
243
				unsigned int orig_sgl_count,
243
				unsigned int orig_sgl_count,
244
				uint64_t seg_bits);
244
				uint64_t seg_bits);
245
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
245
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
246
				unsigned int dest_sgl_count,
246
				unsigned int dest_sgl_count,
247
				struct sglist* src_sgl,
247
				struct sglist* src_sgl,
248
				uint64_t seg_bits);
248
				uint64_t seg_bits);
249
249
250
static device_method_t storvsc_methods[] = {
250
static device_method_t storvsc_methods[] = {
251
	/* Device interface */
251
	/* Device interface */
252
	DEVMETHOD(device_probe,		storvsc_probe),
252
	DEVMETHOD(device_probe,		storvsc_probe),
253
	DEVMETHOD(device_attach,	storvsc_attach),
253
	DEVMETHOD(device_attach,	storvsc_attach),
254
	DEVMETHOD(device_detach,	storvsc_detach),
254
	DEVMETHOD(device_detach,	storvsc_detach),
255
	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
255
	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
256
	DEVMETHOD_END
256
	DEVMETHOD_END
257
};
257
};
258
258
259
static driver_t storvsc_driver = {
259
static driver_t storvsc_driver = {
260
	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
260
	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
261
};
261
};
262
262
263
static devclass_t storvsc_devclass;
263
static devclass_t storvsc_devclass;
264
DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
264
DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
265
MODULE_VERSION(storvsc, 1);
265
MODULE_VERSION(storvsc, 1);
266
MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
266
MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
267
267
268
268
269
/**
269
/**
270
 * The host is capable of sending messages to us that are
270
 * The host is capable of sending messages to us that are
271
 * completely unsolicited. So, we need to address the race
271
 * completely unsolicited. So, we need to address the race
272
 * condition where we may be in the process of unloading the
272
 * condition where we may be in the process of unloading the
273
 * driver when the host may send us an unsolicited message.
273
 * driver when the host may send us an unsolicited message.
274
 * We address this issue by implementing a sequentially
274
 * We address this issue by implementing a sequentially
275
 * consistent protocol:
275
 * consistent protocol:
276
 *
276
 *
277
 * 1. Channel callback is invoked while holding the the channel lock
277
 * 1. Channel callback is invoked while holding the the channel lock
278
 *    and an unloading driver will reset the channel callback under
278
 *    and an unloading driver will reset the channel callback under
279
 *    the protection of this channel lock.
279
 *    the protection of this channel lock.
280
 *
280
 *
281
 * 2. To ensure bounded wait time for unloading a driver, we don't
281
 * 2. To ensure bounded wait time for unloading a driver, we don't
282
 *    permit outgoing traffic once the device is marked as being
282
 *    permit outgoing traffic once the device is marked as being
283
 *    destroyed.
283
 *    destroyed.
284
 *
284
 *
285
 * 3. Once the device is marked as being destroyed, we only
285
 * 3. Once the device is marked as being destroyed, we only
286
 *    permit incoming traffic to properly account for
286
 *    permit incoming traffic to properly account for
287
 *    packets already sent out.
287
 *    packets already sent out.
288
 */
288
 */
289
static inline struct storvsc_softc *
289
static inline struct storvsc_softc *
290
get_stor_device(struct hv_device *device,
290
get_stor_device(struct hv_device *device,
291
				boolean_t outbound)
291
				boolean_t outbound)
292
{
292
{
293
	struct storvsc_softc *sc;
293
	struct storvsc_softc *sc;
294
294
295
	sc = device_get_softc(device->device);
295
	sc = device_get_softc(device->device);
296
	if (sc == NULL) {
296
	if (sc == NULL) {
297
		return NULL;
297
		return NULL;
298
	}
298
	}
299
299
300
	if (outbound) {
300
	if (outbound) {
301
		/*
301
		/*
302
		 * Here we permit outgoing I/O only
302
		 * Here we permit outgoing I/O only
303
		 * if the device is not being destroyed.
303
		 * if the device is not being destroyed.
304
		 */
304
		 */
305
305
306
		if (sc->hs_destroy) {
306
		if (sc->hs_destroy) {
307
			sc = NULL;
307
			sc = NULL;
308
		}
308
		}
309
	} else {
309
	} else {
310
		/*
310
		/*
311
		 * inbound case; if being destroyed
311
		 * inbound case; if being destroyed
312
		 * only permit to account for
312
		 * only permit to account for
313
		 * messages already sent out.
313
		 * messages already sent out.
314
		 */
314
		 */
315
		if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
315
		if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
316
			sc = NULL;
316
			sc = NULL;
317
		}
317
		}
318
	}
318
	}
319
	return sc;
319
	return sc;
320
}
320
}
321
321
322
/**
322
/**
323
 * @brief Callback handler, will be invoked when receive mutil-channel offer
323
 * @brief Callback handler, will be invoked when receive mutil-channel offer
324
 *
324
 *
325
 * @param context  new multi-channel
325
 * @param context  new multi-channel
326
 */
326
 */
327
static void
327
static void
328
storvsc_handle_sc_creation(void *context)
328
storvsc_handle_sc_creation(void *context)
329
{
329
{
330
	hv_vmbus_channel *new_channel;
330
	hv_vmbus_channel *new_channel;
331
	struct hv_device *device;
331
	struct hv_device *device;
332
	struct storvsc_softc *sc;
332
	struct storvsc_softc *sc;
333
	struct vmstor_chan_props props;
333
	struct vmstor_chan_props props;
334
	int ret = 0;
334
	int ret = 0;
335
335
336
	new_channel = (hv_vmbus_channel *)context;
336
	new_channel = (hv_vmbus_channel *)context;
337
	device = new_channel->primary_channel->device;
337
	device = new_channel->primary_channel->device;
338
	sc = get_stor_device(device, TRUE);
338
	sc = get_stor_device(device, TRUE);
339
	if (sc == NULL)
339
	if (sc == NULL)
340
		return;
340
		return;
341
341
342
	if (FALSE == sc->hs_open_multi_channel)
342
	if (FALSE == sc->hs_open_multi_channel)
343
		return;
343
		return;
344
	
344
	
345
	memset(&props, 0, sizeof(props));
345
	memset(&props, 0, sizeof(props));
346
346
347
	ret = hv_vmbus_channel_open(new_channel,
347
	ret = hv_vmbus_channel_open(new_channel,
348
	    sc->hs_drv_props->drv_ringbuffer_size,
348
	    sc->hs_drv_props->drv_ringbuffer_size,
349
  	    sc->hs_drv_props->drv_ringbuffer_size,
349
  	    sc->hs_drv_props->drv_ringbuffer_size,
350
	    (void *)&props,
350
	    (void *)&props,
351
	    sizeof(struct vmstor_chan_props),
351
	    sizeof(struct vmstor_chan_props),
352
	    hv_storvsc_on_channel_callback,
352
	    hv_storvsc_on_channel_callback,
353
	    new_channel);
353
	    new_channel);
354
354
355
	return;
355
	return;
356
}
356
}
357
357
358
/**
358
/**
359
 * @brief Send multi-channel creation request to host
359
 * @brief Send multi-channel creation request to host
360
 *
360
 *
361
 * @param device  a Hyper-V device pointer
361
 * @param device  a Hyper-V device pointer
362
 * @param max_chans  the max channels supported by vmbus
362
 * @param max_chans  the max channels supported by vmbus
363
 */
363
 */
364
static void
364
static void
365
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
365
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
366
{
366
{
367
	struct storvsc_softc *sc;
367
	struct storvsc_softc *sc;
368
	struct hv_storvsc_request *request;
368
	struct hv_storvsc_request *request;
369
	struct vstor_packet *vstor_packet;	
369
	struct vstor_packet *vstor_packet;	
370
	int request_channels_cnt = 0;
370
	int request_channels_cnt = 0;
371
	int ret;
371
	int ret;
372
372
373
	/* get multichannels count that need to create */
373
	/* get multichannels count that need to create */
374
	request_channels_cnt = MIN(max_chans, mp_ncpus);
374
	request_channels_cnt = MIN(max_chans, mp_ncpus);
375
375
376
	sc = get_stor_device(dev, TRUE);
376
	sc = get_stor_device(dev, TRUE);
377
	if (sc == NULL) {
377
	if (sc == NULL) {
378
		printf("Storvsc_error: get sc failed while send mutilchannel "
378
		printf("Storvsc_error: get sc failed while send mutilchannel "
379
		    "request\n");
379
		    "request\n");
380
		return;
380
		return;
381
	}
381
	}
382
382
383
	request = &sc->hs_init_req;
383
	request = &sc->hs_init_req;
384
384
385
	/* Establish a handler for multi-channel */
385
	/* Establish a handler for multi-channel */
386
	dev->channel->sc_creation_callback = storvsc_handle_sc_creation;
386
	dev->channel->sc_creation_callback = storvsc_handle_sc_creation;
387
387
388
	/* request the host to create multi-channel */
388
	/* request the host to create multi-channel */
389
	memset(request, 0, sizeof(struct hv_storvsc_request));
389
	memset(request, 0, sizeof(struct hv_storvsc_request));
390
	
390
	
391
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
391
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
392
392
393
	vstor_packet = &request->vstor_packet;
393
	vstor_packet = &request->vstor_packet;
394
	
394
	
395
	vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
395
	vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
396
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
396
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
397
	vstor_packet->u.multi_channels_cnt = request_channels_cnt;
397
	vstor_packet->u.multi_channels_cnt = request_channels_cnt;
398
398
399
	ret = hv_vmbus_channel_send_packet(
399
	ret = hv_vmbus_channel_send_packet(
400
	    dev->channel,
400
	    dev->channel,
401
	    vstor_packet,
401
	    vstor_packet,
402
	    VSTOR_PKT_SIZE,
402
	    VSTOR_PKT_SIZE,
403
	    (uint64_t)(uintptr_t)request,
403
	    (uint64_t)(uintptr_t)request,
404
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
404
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
405
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
405
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
406
406
407
	/* wait for 5 seconds */
407
	/* wait for 5 seconds */
408
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
408
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
409
	if (ret != 0) {		
409
	if (ret != 0) {		
410
		printf("Storvsc_error: create multi-channel timeout, %d\n",
410
		printf("Storvsc_error: create multi-channel timeout, %d\n",
411
		    ret);
411
		    ret);
412
		return;
412
		return;
413
	}
413
	}
414
414
415
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
415
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
416
	    vstor_packet->status != 0) {		
416
	    vstor_packet->status != 0) {		
417
		printf("Storvsc_error: create multi-channel invalid operation "
417
		printf("Storvsc_error: create multi-channel invalid operation "
418
		    "(%d) or statue (%u)\n",
418
		    "(%d) or statue (%u)\n",
419
		    vstor_packet->operation, vstor_packet->status);
419
		    vstor_packet->operation, vstor_packet->status);
420
		return;
420
		return;
421
	}
421
	}
422
422
423
	sc->hs_open_multi_channel = TRUE;
423
	sc->hs_open_multi_channel = TRUE;
424
424
425
	if (bootverbose)
425
	if (bootverbose)
426
		printf("Storvsc create multi-channel success!\n");
426
		printf("Storvsc create multi-channel success!\n");
427
}
427
}
428
428
429
/**
429
/**
430
 * @brief initialize channel connection to parent partition
430
 * @brief initialize channel connection to parent partition
431
 *
431
 *
432
 * @param dev  a Hyper-V device pointer
432
 * @param dev  a Hyper-V device pointer
433
 * @returns  0 on success, non-zero error on failure
433
 * @returns  0 on success, non-zero error on failure
434
 */
434
 */
435
static int
435
static int
436
hv_storvsc_channel_init(struct hv_device *dev)
436
hv_storvsc_channel_init(struct hv_device *dev)
437
{
437
{
438
	int ret = 0;
438
	int ret = 0;
439
	struct hv_storvsc_request *request;
439
	struct hv_storvsc_request *request;
440
	struct vstor_packet *vstor_packet;
440
	struct vstor_packet *vstor_packet;
441
	struct storvsc_softc *sc;
441
	struct storvsc_softc *sc;
442
	uint16_t max_chans = 0;
442
	uint16_t max_chans = 0;
443
	boolean_t support_multichannel = FALSE;
443
	boolean_t support_multichannel = FALSE;
444
444
445
	max_chans = 0;
445
	max_chans = 0;
446
	support_multichannel = FALSE;
446
	support_multichannel = FALSE;
447
447
448
	sc = get_stor_device(dev, TRUE);
448
	sc = get_stor_device(dev, TRUE);
449
	if (sc == NULL)
449
	if (sc == NULL)
450
		return (ENODEV);
450
		return (ENODEV);
451
451
452
	request = &sc->hs_init_req;
452
	request = &sc->hs_init_req;
453
	memset(request, 0, sizeof(struct hv_storvsc_request));
453
	memset(request, 0, sizeof(struct hv_storvsc_request));
454
	vstor_packet = &request->vstor_packet;
454
	vstor_packet = &request->vstor_packet;
455
	request->softc = sc;
455
	request->softc = sc;
456
456
457
	/**
457
	/**
458
	 * Initiate the vsc/vsp initialization protocol on the open channel
458
	 * Initiate the vsc/vsp initialization protocol on the open channel
459
	 */
459
	 */
460
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
460
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
461
461
462
	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
462
	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
463
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
463
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
464
464
465
465
466
	ret = hv_vmbus_channel_send_packet(
466
	ret = hv_vmbus_channel_send_packet(
467
			dev->channel,
467
			dev->channel,
468
			vstor_packet,
468
			vstor_packet,
469
			VSTOR_PKT_SIZE,
469
			VSTOR_PKT_SIZE,
470
			(uint64_t)(uintptr_t)request,
470
			(uint64_t)(uintptr_t)request,
471
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
471
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
472
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
472
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
473
473
474
	if (ret != 0)
474
	if (ret != 0)
475
		goto cleanup;
475
		goto cleanup;
476
476
477
	/* wait 5 seconds */
477
	/* wait 5 seconds */
478
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
478
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
479
	if (ret != 0)
479
	if (ret != 0)
480
		goto cleanup;
480
		goto cleanup;
481
481
482
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
482
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
483
		vstor_packet->status != 0) {
483
		vstor_packet->status != 0) {
484
		goto cleanup;
484
		goto cleanup;
485
	}
485
	}
486
486
487
	/* reuse the packet for version range supported */
487
	/* reuse the packet for version range supported */
488
488
489
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
489
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
490
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
490
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
491
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
491
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
492
492
493
	vstor_packet->u.version.major_minor =
493
	vstor_packet->u.version.major_minor =
494
	    VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor);
494
	    VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor);
495
495
496
	/* revision is only significant for Windows guests */
496
	/* revision is only significant for Windows guests */
497
	vstor_packet->u.version.revision = 0;
497
	vstor_packet->u.version.revision = 0;
498
498
499
	ret = hv_vmbus_channel_send_packet(
499
	ret = hv_vmbus_channel_send_packet(
500
			dev->channel,
500
			dev->channel,
501
			vstor_packet,
501
			vstor_packet,
502
			VSTOR_PKT_SIZE,
502
			VSTOR_PKT_SIZE,
503
			(uint64_t)(uintptr_t)request,
503
			(uint64_t)(uintptr_t)request,
504
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
504
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
505
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
505
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
506
506
507
	if (ret != 0)
507
	if (ret != 0)
508
		goto cleanup;
508
		goto cleanup;
509
509
510
	/* wait 5 seconds */
510
	/* wait 5 seconds */
511
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
511
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
512
512
513
	if (ret)
513
	if (ret)
514
		goto cleanup;
514
		goto cleanup;
515
515
516
	/* TODO: Check returned version */
516
	/* TODO: Check returned version */
517
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
517
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
518
		vstor_packet->status != 0)
518
		vstor_packet->status != 0)
519
		goto cleanup;
519
		goto cleanup;
520
520
521
	/**
521
	/**
522
	 * Query channel properties
522
	 * Query channel properties
523
	 */
523
	 */
524
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
524
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
525
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
525
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
526
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
526
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
527
527
528
	ret = hv_vmbus_channel_send_packet(
528
	ret = hv_vmbus_channel_send_packet(
529
				dev->channel,
529
				dev->channel,
530
				vstor_packet,
530
				vstor_packet,
531
				VSTOR_PKT_SIZE,
531
				VSTOR_PKT_SIZE,
532
				(uint64_t)(uintptr_t)request,
532
				(uint64_t)(uintptr_t)request,
533
				HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
533
				HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
534
				HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
534
				HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
535
535
536
	if ( ret != 0)
536
	if ( ret != 0)
537
		goto cleanup;
537
		goto cleanup;
538
538
539
	/* wait 5 seconds */
539
	/* wait 5 seconds */
540
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
540
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
541
541
542
	if (ret != 0)
542
	if (ret != 0)
543
		goto cleanup;
543
		goto cleanup;
544
544
545
	/* TODO: Check returned version */
545
	/* TODO: Check returned version */
546
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
546
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
547
	    vstor_packet->status != 0) {
547
	    vstor_packet->status != 0) {
548
		goto cleanup;
548
		goto cleanup;
549
	}
549
	}
550
550
551
	/* multi-channels feature is supported by WIN8 and above version */
551
	/* multi-channels feature is supported by WIN8 and above version */
552
	max_chans = vstor_packet->u.chan_props.max_channel_cnt;
552
	max_chans = vstor_packet->u.chan_props.max_channel_cnt;
553
	if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
553
	if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
554
	    (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
554
	    (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
555
	    (vstor_packet->u.chan_props.flags &
555
	    (vstor_packet->u.chan_props.flags &
556
	     HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
556
	     HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
557
		support_multichannel = TRUE;
557
		support_multichannel = TRUE;
558
	}
558
	}
559
559
560
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
560
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
561
	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
561
	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
562
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
562
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
563
563
564
	ret = hv_vmbus_channel_send_packet(
564
	ret = hv_vmbus_channel_send_packet(
565
			dev->channel,
565
			dev->channel,
566
			vstor_packet,
566
			vstor_packet,
567
			VSTOR_PKT_SIZE,
567
			VSTOR_PKT_SIZE,
568
			(uint64_t)(uintptr_t)request,
568
			(uint64_t)(uintptr_t)request,
569
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
569
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
570
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
570
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
571
571
572
	if (ret != 0) {
572
	if (ret != 0) {
573
		goto cleanup;
573
		goto cleanup;
574
	}
574
	}
575
575
576
	/* wait 5 seconds */
576
	/* wait 5 seconds */
577
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
577
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
578
578
579
	if (ret != 0)
579
	if (ret != 0)
580
		goto cleanup;
580
		goto cleanup;
581
581
582
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
582
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
583
	    vstor_packet->status != 0)
583
	    vstor_packet->status != 0)
584
		goto cleanup;
584
		goto cleanup;
585
585
586
	/*
586
	/*
587
	 * If multi-channel is supported, send multichannel create
587
	 * If multi-channel is supported, send multichannel create
588
	 * request to host.
588
	 * request to host.
589
	 */
589
	 */
590
	if (support_multichannel)
590
	if (support_multichannel)
591
		storvsc_send_multichannel_request(dev, max_chans);
591
		storvsc_send_multichannel_request(dev, max_chans);
592
592
593
cleanup:
593
cleanup:
594
	sema_destroy(&request->synch_sema);
594
	sema_destroy(&request->synch_sema);
595
	return (ret);
595
	return (ret);
596
}
596
}
597
597
598
/**
598
/**
599
 * @brief Open channel connection to paraent partition StorVSP driver
599
 * @brief Open channel connection to paraent partition StorVSP driver
600
 *
600
 *
601
 * Open and initialize channel connection to parent partition StorVSP driver.
601
 * Open and initialize channel connection to parent partition StorVSP driver.
602
 *
602
 *
603
 * @param pointer to a Hyper-V device
603
 * @param pointer to a Hyper-V device
604
 * @returns 0 on success, non-zero error on failure
604
 * @returns 0 on success, non-zero error on failure
605
 */
605
 */
606
static int
606
static int
607
hv_storvsc_connect_vsp(struct hv_device *dev)
607
hv_storvsc_connect_vsp(struct hv_device *dev)
608
{	
608
{	
609
	int ret = 0;
609
	int ret = 0;
610
	struct vmstor_chan_props props;
610
	struct vmstor_chan_props props;
611
	struct storvsc_softc *sc;
611
	struct storvsc_softc *sc;
612
612
613
	sc = device_get_softc(dev->device);
613
	sc = device_get_softc(dev->device);
614
		
614
		
615
	memset(&props, 0, sizeof(struct vmstor_chan_props));
615
	memset(&props, 0, sizeof(struct vmstor_chan_props));
616
616
617
	/*
617
	/*
618
	 * Open the channel
618
	 * Open the channel
619
	 */
619
	 */
620
620
621
	ret = hv_vmbus_channel_open(
621
	ret = hv_vmbus_channel_open(
622
		dev->channel,
622
		dev->channel,
623
		sc->hs_drv_props->drv_ringbuffer_size,
623
		sc->hs_drv_props->drv_ringbuffer_size,
624
		sc->hs_drv_props->drv_ringbuffer_size,
624
		sc->hs_drv_props->drv_ringbuffer_size,
625
		(void *)&props,
625
		(void *)&props,
626
		sizeof(struct vmstor_chan_props),
626
		sizeof(struct vmstor_chan_props),
627
		hv_storvsc_on_channel_callback,
627
		hv_storvsc_on_channel_callback,
628
		dev->channel);
628
		dev->channel);
629
629
630
	if (ret != 0) {
630
	if (ret != 0) {
631
		return ret;
631
		return ret;
632
	}
632
	}
633
633
634
	ret = hv_storvsc_channel_init(dev);
634
	ret = hv_storvsc_channel_init(dev);
635
635
636
	return (ret);
636
	return (ret);
637
}
637
}
638
638
639
#if HVS_HOST_RESET
639
#if HVS_HOST_RESET
640
static int
640
static int
641
hv_storvsc_host_reset(struct hv_device *dev)
641
hv_storvsc_host_reset(struct hv_device *dev)
642
{
642
{
643
	int ret = 0;
643
	int ret = 0;
644
	struct storvsc_softc *sc;
644
	struct storvsc_softc *sc;
645
645
646
	struct hv_storvsc_request *request;
646
	struct hv_storvsc_request *request;
647
	struct vstor_packet *vstor_packet;
647
	struct vstor_packet *vstor_packet;
648
648
649
	sc = get_stor_device(dev, TRUE);
649
	sc = get_stor_device(dev, TRUE);
650
	if (sc == NULL) {
650
	if (sc == NULL) {
651
		return ENODEV;
651
		return ENODEV;
652
	}
652
	}
653
653
654
	request = &sc->hs_reset_req;
654
	request = &sc->hs_reset_req;
655
	request->softc = sc;
655
	request->softc = sc;
656
	vstor_packet = &request->vstor_packet;
656
	vstor_packet = &request->vstor_packet;
657
657
658
	sema_init(&request->synch_sema, 0, "stor synch sema");
658
	sema_init(&request->synch_sema, 0, "stor synch sema");
659
659
660
	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
660
	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
661
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
661
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
662
662
663
	ret = hv_vmbus_channel_send_packet(dev->channel,
663
	ret = hv_vmbus_channel_send_packet(dev->channel,
664
			vstor_packet,
664
			vstor_packet,
665
			VSTOR_PKT_SIZE,
665
			VSTOR_PKT_SIZE,
666
			(uint64_t)(uintptr_t)&sc->hs_reset_req,
666
			(uint64_t)(uintptr_t)&sc->hs_reset_req,
667
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
667
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
668
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
668
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
669
669
670
	if (ret != 0) {
670
	if (ret != 0) {
671
		goto cleanup;
671
		goto cleanup;
672
	}
672
	}
673
673
674
	ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
674
	ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
675
675
676
	if (ret) {
676
	if (ret) {
677
		goto cleanup;
677
		goto cleanup;
678
	}
678
	}
679
679
680
680
681
	/*
681
	/*
682
	 * At this point, all outstanding requests in the adapter
682
	 * At this point, all outstanding requests in the adapter
683
	 * should have been flushed out and return to us
683
	 * should have been flushed out and return to us
684
	 */
684
	 */
685
685
686
cleanup:
686
cleanup:
687
	sema_destroy(&request->synch_sema);
687
	sema_destroy(&request->synch_sema);
688
	return (ret);
688
	return (ret);
689
}
689
}
690
#endif /* HVS_HOST_RESET */
690
#endif /* HVS_HOST_RESET */
691
691
692
/**
692
/**
693
 * @brief Function to initiate an I/O request
693
 * @brief Function to initiate an I/O request
694
 *
694
 *
695
 * @param device Hyper-V device pointer
695
 * @param device Hyper-V device pointer
696
 * @param request pointer to a request structure
696
 * @param request pointer to a request structure
697
 * @returns 0 on success, non-zero error on failure
697
 * @returns 0 on success, non-zero error on failure
698
 */
698
 */
699
static int
699
static int
700
hv_storvsc_io_request(struct hv_device *device,
700
hv_storvsc_io_request(struct hv_device *device,
701
					  struct hv_storvsc_request *request)
701
					  struct hv_storvsc_request *request)
702
{
702
{
703
	struct storvsc_softc *sc;
703
	struct storvsc_softc *sc;
704
	struct vstor_packet *vstor_packet = &request->vstor_packet;
704
	struct vstor_packet *vstor_packet = &request->vstor_packet;
705
	struct hv_vmbus_channel* outgoing_channel = NULL;
705
	struct hv_vmbus_channel* outgoing_channel = NULL;
706
	int ret = 0;
706
	int ret = 0;
707
707
708
	sc = get_stor_device(device, TRUE);
708
	sc = get_stor_device(device, TRUE);
709
709
710
	if (sc == NULL) {
710
	if (sc == NULL) {
711
		return ENODEV;
711
		return ENODEV;
712
	}
712
	}
713
713
714
	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
714
	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
715
715
716
	vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
716
	vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
717
	
717
	
718
	vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
718
	vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
719
719
720
	vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
720
	vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
721
721
722
	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
722
	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
723
723
724
	outgoing_channel = vmbus_select_outgoing_channel(device->channel);
724
	outgoing_channel = vmbus_select_outgoing_channel(device->channel);
725
725
726
	mtx_unlock(&request->softc->hs_lock);
726
	mtx_unlock(&request->softc->hs_lock);
727
	if (request->data_buf.length) {
727
	if (request->data_buf.length) {
728
		ret = hv_vmbus_channel_send_packet_multipagebuffer(
728
		ret = hv_vmbus_channel_send_packet_multipagebuffer(
729
				outgoing_channel,
729
				outgoing_channel,
730
				&request->data_buf,
730
				&request->data_buf,
731
				vstor_packet,
731
				vstor_packet,
732
				VSTOR_PKT_SIZE,
732
				VSTOR_PKT_SIZE,
733
				(uint64_t)(uintptr_t)request);
733
				(uint64_t)(uintptr_t)request);
734
734
735
	} else {
735
	} else {
736
		ret = hv_vmbus_channel_send_packet(
736
		ret = hv_vmbus_channel_send_packet(
737
			outgoing_channel,
737
			outgoing_channel,
738
			vstor_packet,
738
			vstor_packet,
739
			VSTOR_PKT_SIZE,
739
			VSTOR_PKT_SIZE,
740
			(uint64_t)(uintptr_t)request,
740
			(uint64_t)(uintptr_t)request,
741
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
741
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
742
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
742
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
743
	}
743
	}
744
	mtx_lock(&request->softc->hs_lock);
744
	mtx_lock(&request->softc->hs_lock);
745
745
746
	if (ret != 0) {
746
	if (ret != 0) {
747
		printf("Unable to send packet %p ret %d", vstor_packet, ret);
747
		printf("Unable to send packet %p ret %d", vstor_packet, ret);
748
	} else {
748
	} else {
749
		atomic_add_int(&sc->hs_num_out_reqs, 1);
749
		atomic_add_int(&sc->hs_num_out_reqs, 1);
750
	}
750
	}
751
751
752
	return (ret);
752
	return (ret);
753
}
753
}
754
754
755
755
756
/**
756
/**
757
 * Process IO_COMPLETION_OPERATION and ready
757
 * Process IO_COMPLETION_OPERATION and ready
758
 * the result to be completed for upper layer
758
 * the result to be completed for upper layer
759
 * processing by the CAM layer.
759
 * processing by the CAM layer.
760
 */
760
 */
761
static void
761
static void
762
hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
762
hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
763
			   struct vstor_packet *vstor_packet,
763
			   struct vstor_packet *vstor_packet,
764
			   struct hv_storvsc_request *request)
764
			   struct hv_storvsc_request *request)
765
{
765
{
766
	struct vmscsi_req *vm_srb;
766
	struct vmscsi_req *vm_srb;
767
767
768
	vm_srb = &vstor_packet->u.vm_srb;
768
	vm_srb = &vstor_packet->u.vm_srb;
769
769
770
	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
770
	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
771
			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
771
			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
772
		/* Autosense data available */
772
		/* Autosense data available */
773
773
774
		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
774
		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
775
				("vm_srb->sense_info_len <= "
775
				("vm_srb->sense_info_len <= "
776
				 "request->sense_info_len"));
776
				 "request->sense_info_len"));
777
777
778
		memcpy(request->sense_data, vm_srb->u.sense_data,
778
		memcpy(request->sense_data, vm_srb->u.sense_data,
779
			vm_srb->sense_info_len);
779
			vm_srb->sense_info_len);
780
780
781
		request->sense_info_len = vm_srb->sense_info_len;
781
		request->sense_info_len = vm_srb->sense_info_len;
782
	}
782
	}
783
783
784
	/* Complete request by passing to the CAM layer */
784
	/* Complete request by passing to the CAM layer */
785
	storvsc_io_done(request);
785
	storvsc_io_done(request);
786
	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
786
	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
787
	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
787
	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
788
		sema_post(&sc->hs_drain_sema);
788
		sema_post(&sc->hs_drain_sema);
789
	}
789
	}
790
}
790
}
791
791
792
static void
792
static void
793
hv_storvsc_rescan_target(struct storvsc_softc *sc)
793
hv_storvsc_rescan_target(struct storvsc_softc *sc)
794
{
794
{
795
	path_id_t pathid;
795
	path_id_t pathid;
796
	target_id_t targetid;
796
	target_id_t targetid;
797
	union ccb *ccb;
797
	union ccb *ccb;
798
798
799
	pathid = cam_sim_path(sc->hs_sim);
799
	pathid = cam_sim_path(sc->hs_sim);
800
	targetid = CAM_TARGET_WILDCARD;
800
	targetid = CAM_TARGET_WILDCARD;
801
801
802
	/*
802
	/*
803
	 * Allocate a CCB and schedule a rescan.
803
	 * Allocate a CCB and schedule a rescan.
804
	 */
804
	 */
805
	ccb = xpt_alloc_ccb_nowait();
805
	ccb = xpt_alloc_ccb_nowait();
806
	if (ccb == NULL) {
806
	if (ccb == NULL) {
807
		printf("unable to alloc CCB for rescan\n");
807
		printf("unable to alloc CCB for rescan\n");
808
		return;
808
		return;
809
	}
809
	}
810
810
811
	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
811
	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
812
	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
812
	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
813
		printf("unable to create path for rescan, pathid: %d,"
813
		printf("unable to create path for rescan, pathid: %d,"
814
		    "targetid: %d\n", pathid, targetid);
814
		    "targetid: %d\n", pathid, targetid);
815
		xpt_free_ccb(ccb);
815
		xpt_free_ccb(ccb);
816
		return;
816
		return;
817
	}
817
	}
818
818
819
	if (targetid == CAM_TARGET_WILDCARD)
819
	if (targetid == CAM_TARGET_WILDCARD)
820
		ccb->ccb_h.func_code = XPT_SCAN_BUS;
820
		ccb->ccb_h.func_code = XPT_SCAN_BUS;
821
	else
821
	else
822
		ccb->ccb_h.func_code = XPT_SCAN_TGT;
822
		ccb->ccb_h.func_code = XPT_SCAN_TGT;
823
823
824
	xpt_rescan(ccb);
824
	xpt_rescan(ccb);
825
}
825
}
826
826
827
static void
827
static void
828
hv_storvsc_on_channel_callback(void *context)
828
hv_storvsc_on_channel_callback(void *context)
829
{
829
{
830
	int ret = 0;
830
	int ret = 0;
831
	hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
831
	hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
832
	struct hv_device *device = NULL;
832
	struct hv_device *device = NULL;
833
	struct storvsc_softc *sc;
833
	struct storvsc_softc *sc;
834
	uint32_t bytes_recvd;
834
	uint32_t bytes_recvd;
835
	uint64_t request_id;
835
	uint64_t request_id;
836
	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
836
	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
837
	struct hv_storvsc_request *request;
837
	struct hv_storvsc_request *request;
838
	struct vstor_packet *vstor_packet;
838
	struct vstor_packet *vstor_packet;
839
839
840
	if (channel->primary_channel != NULL){
840
	if (channel->primary_channel != NULL){
841
		device = channel->primary_channel->device;
841
		device = channel->primary_channel->device;
842
	} else {
842
	} else {
843
		device = channel->device;
843
		device = channel->device;
844
	}
844
	}
845
845
846
	KASSERT(device, ("device is NULL"));
846
	KASSERT(device, ("device is NULL"));
847
847
848
	sc = get_stor_device(device, FALSE);
848
	sc = get_stor_device(device, FALSE);
849
	if (sc == NULL) {
849
	if (sc == NULL) {
850
		printf("Storvsc_error: get stor device failed.\n");
850
		printf("Storvsc_error: get stor device failed.\n");
851
		return;
851
		return;
852
	}
852
	}
853
853
854
	ret = hv_vmbus_channel_recv_packet(
854
	ret = hv_vmbus_channel_recv_packet(
855
			channel,
855
			channel,
856
			packet,
856
			packet,
857
			roundup2(VSTOR_PKT_SIZE, 8),
857
			roundup2(VSTOR_PKT_SIZE, 8),
858
			&bytes_recvd,
858
			&bytes_recvd,
859
			&request_id);
859
			&request_id);
860
860
861
	while ((ret == 0) && (bytes_recvd > 0)) {
861
	while ((ret == 0) && (bytes_recvd > 0)) {
862
		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
862
		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
863
863
864
		if ((request == &sc->hs_init_req) ||
864
		if ((request == &sc->hs_init_req) ||
865
			(request == &sc->hs_reset_req)) {
865
			(request == &sc->hs_reset_req)) {
866
			memcpy(&request->vstor_packet, packet,
866
			memcpy(&request->vstor_packet, packet,
867
				   sizeof(struct vstor_packet));
867
				   sizeof(struct vstor_packet));
868
			sema_post(&request->synch_sema);
868
			sema_post(&request->synch_sema);
869
		} else {
869
		} else {
870
			vstor_packet = (struct vstor_packet *)packet;
870
			vstor_packet = (struct vstor_packet *)packet;
871
			switch(vstor_packet->operation) {
871
			switch(vstor_packet->operation) {
872
			case VSTOR_OPERATION_COMPLETEIO:
872
			case VSTOR_OPERATION_COMPLETEIO:
873
				if (request == NULL)
873
				if (request == NULL)
874
					panic("VMBUS: storvsc received a "
874
					panic("VMBUS: storvsc received a "
875
					    "packet with NULL request id in "
875
					    "packet with NULL request id in "
876
					    "COMPLETEIO operation.");
876
					    "COMPLETEIO operation.");
877
877
878
				hv_storvsc_on_iocompletion(sc,
878
				hv_storvsc_on_iocompletion(sc,
879
							vstor_packet, request);
879
							vstor_packet, request);
880
				break;
880
				break;
881
			case VSTOR_OPERATION_REMOVEDEVICE:
881
			case VSTOR_OPERATION_REMOVEDEVICE:
882
				printf("VMBUS: storvsc operation %d not "
882
				printf("VMBUS: storvsc operation %d not "
883
				    "implemented.\n", vstor_packet->operation);
883
				    "implemented.\n", vstor_packet->operation);
884
				/* TODO: implement */
884
				/* TODO: implement */
885
				break;
885
				break;
886
			case VSTOR_OPERATION_ENUMERATE_BUS:
886
			case VSTOR_OPERATION_ENUMERATE_BUS:
887
				hv_storvsc_rescan_target(sc);
887
				hv_storvsc_rescan_target(sc);
888
				break;
888
				break;
889
			default:
889
			default:
890
				break;
890
				break;
891
			}			
891
			}			
892
		}
892
		}
893
		ret = hv_vmbus_channel_recv_packet(
893
		ret = hv_vmbus_channel_recv_packet(
894
				channel,
894
				channel,
895
				packet,
895
				packet,
896
				roundup2(VSTOR_PKT_SIZE, 8),
896
				roundup2(VSTOR_PKT_SIZE, 8),
897
				&bytes_recvd,
897
				&bytes_recvd,
898
				&request_id);
898
				&request_id);
899
	}
899
	}
900
}
900
}
901
901
902
/**
902
/**
903
 * @brief StorVSC probe function
903
 * @brief StorVSC probe function
904
 *
904
 *
905
 * Device probe function.  Returns 0 if the input device is a StorVSC
905
 * Device probe function.  Returns 0 if the input device is a StorVSC
906
 * device.  Otherwise, a ENXIO is returned.  If the input device is
906
 * device.  Otherwise, a ENXIO is returned.  If the input device is
907
 * for BlkVSC (paravirtual IDE) device and this support is disabled in
907
 * for BlkVSC (paravirtual IDE) device and this support is disabled in
908
 * favor of the emulated ATA/IDE device, return ENXIO.
908
 * favor of the emulated ATA/IDE device, return ENXIO.
909
 *
909
 *
910
 * @param a device
910
 * @param a device
911
 * @returns 0 on success, ENXIO if not a matcing StorVSC device
911
 * @returns 0 on success, ENXIO if not a matcing StorVSC device
912
 */
912
 */
913
static int
913
static int
914
storvsc_probe(device_t dev)
914
storvsc_probe(device_t dev)
915
{
915
{
916
	int ata_disk_enable = 0;
916
	int ata_disk_enable = 0;
917
	int ret	= ENXIO;
917
	int ret	= ENXIO;
918
	
918
	
919
	if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 ||
919
	if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 ||
920
	    hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) {
920
	    hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) {
921
		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
921
		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
922
		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
922
		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
923
		storvsc_current_major = STORVSC_WIN7_MAJOR;
923
		storvsc_current_major = STORVSC_WIN7_MAJOR;
924
		storvsc_current_minor = STORVSC_WIN7_MINOR;
924
		storvsc_current_minor = STORVSC_WIN7_MINOR;
925
	} else {
925
	} else {
926
		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
926
		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
927
		vmscsi_size_delta = 0;
927
		vmscsi_size_delta = 0;
928
		storvsc_current_major = STORVSC_WIN8_MAJOR;
928
		storvsc_current_major = STORVSC_WIN8_MAJOR;
929
		storvsc_current_minor = STORVSC_WIN8_MINOR;
929
		storvsc_current_minor = STORVSC_WIN8_MINOR;
930
	}
930
	}
931
	
931
	
932
	switch (storvsc_get_storage_type(dev)) {
932
	switch (storvsc_get_storage_type(dev)) {
933
	case DRIVER_BLKVSC:
933
	case DRIVER_BLKVSC:
934
		if(bootverbose)
934
		if(bootverbose)
935
			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
935
			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
936
		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
936
		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
937
			if(bootverbose)
937
			if(bootverbose)
938
				device_printf(dev,
938
				device_printf(dev,
939
					"Enlightened ATA/IDE detected\n");
939
					"Enlightened ATA/IDE detected\n");
940
			ret = BUS_PROBE_DEFAULT;
940
			ret = BUS_PROBE_DEFAULT;
941
		} else if(bootverbose)
941
		} else if(bootverbose)
942
			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
942
			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
943
		break;
943
		break;
944
	case DRIVER_STORVSC:
944
	case DRIVER_STORVSC:
945
		if(bootverbose)
945
		if(bootverbose)
946
			device_printf(dev, "Enlightened SCSI device detected\n");
946
			device_printf(dev, "Enlightened SCSI device detected\n");
947
		ret = BUS_PROBE_DEFAULT;
947
		ret = BUS_PROBE_DEFAULT;
948
		break;
948
		break;
949
	default:
949
	default:
950
		ret = ENXIO;
950
		ret = ENXIO;
951
	}
951
	}
952
	return (ret);
952
	return (ret);
953
}
953
}
954
954
955
/**
955
/**
956
 * @brief StorVSC attach function
956
 * @brief StorVSC attach function
957
 *
957
 *
958
 * Function responsible for allocating per-device structures,
958
 * Function responsible for allocating per-device structures,
959
 * setting up CAM interfaces and scanning for available LUNs to
959
 * setting up CAM interfaces and scanning for available LUNs to
960
 * be used for SCSI device peripherals.
960
 * be used for SCSI device peripherals.
961
 *
961
 *
962
 * @param a device
962
 * @param a device
963
 * @returns 0 on success or an error on failure
963
 * @returns 0 on success or an error on failure
964
 */
964
 */
965
static int
965
static int
966
storvsc_attach(device_t dev)
966
storvsc_attach(device_t dev)
967
{
967
{
968
	struct hv_device *hv_dev = vmbus_get_devctx(dev);
968
	struct hv_device *hv_dev = vmbus_get_devctx(dev);
969
	enum hv_storage_type stor_type;
969
	enum hv_storage_type stor_type;
970
	struct storvsc_softc *sc;
970
	struct storvsc_softc *sc;
971
	struct cam_devq *devq;
971
	struct cam_devq *devq;
972
	int ret, i, j;
972
	int ret, i, j;
973
	struct hv_storvsc_request *reqp;
973
	struct hv_storvsc_request *reqp;
974
	struct root_hold_token *root_mount_token = NULL;
974
	struct root_hold_token *root_mount_token = NULL;
975
	struct hv_sgl_node *sgl_node = NULL;
975
	struct hv_sgl_node *sgl_node = NULL;
976
	void *tmp_buff = NULL;
976
	void *tmp_buff = NULL;
977
977
978
	/*
978
	/*
979
	 * We need to serialize storvsc attach calls.
979
	 * We need to serialize storvsc attach calls.
980
	 */
980
	 */
981
	root_mount_token = root_mount_hold("storvsc");
981
	root_mount_token = root_mount_hold("storvsc");
982
982
983
	sc = device_get_softc(dev);
983
	sc = device_get_softc(dev);
984
	if (sc == NULL) {
984
	if (sc == NULL) {
985
		ret = ENOMEM;
985
		ret = ENOMEM;
986
		goto cleanup;
986
		goto cleanup;
987
	}
987
	}
988
988
989
	stor_type = storvsc_get_storage_type(dev);
989
	stor_type = storvsc_get_storage_type(dev);
990
990
991
	if (stor_type == DRIVER_UNKNOWN) {
991
	if (stor_type == DRIVER_UNKNOWN) {
992
		ret = ENODEV;
992
		ret = ENODEV;
993
		goto cleanup;
993
		goto cleanup;
994
	}
994
	}
995
995
996
	bzero(sc, sizeof(struct storvsc_softc));
996
	bzero(sc, sizeof(struct storvsc_softc));
997
997
998
	/* fill in driver specific properties */
998
	/* fill in driver specific properties */
999
	sc->hs_drv_props = &g_drv_props_table[stor_type];
999
	sc->hs_drv_props = &g_drv_props_table[stor_type];
1000
1000
1001
	/* fill in device specific properties */
1001
	/* fill in device specific properties */
1002
	sc->hs_unit	= device_get_unit(dev);
1002
	sc->hs_unit	= device_get_unit(dev);
1003
	sc->hs_dev	= hv_dev;
1003
	sc->hs_dev	= hv_dev;
1004
	device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
1004
	device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
1005
1005
1006
	LIST_INIT(&sc->hs_free_list);
1006
	LIST_INIT(&sc->hs_free_list);
1007
	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1007
	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1008
1008
1009
	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1009
	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1010
		reqp = malloc(sizeof(struct hv_storvsc_request),
1010
		reqp = malloc(sizeof(struct hv_storvsc_request),
1011
				 M_DEVBUF, M_WAITOK|M_ZERO);
1011
				 M_DEVBUF, M_WAITOK|M_ZERO);
1012
		reqp->softc = sc;
1012
		reqp->softc = sc;
1013
1013
1014
		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1014
		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1015
	}
1015
	}
1016
1016
1017
	/* create sg-list page pool */
1017
	/* create sg-list page pool */
1018
	if (FALSE == g_hv_sgl_page_pool.is_init) {
1018
	if (FALSE == g_hv_sgl_page_pool.is_init) {
1019
		g_hv_sgl_page_pool.is_init = TRUE;
1019
		g_hv_sgl_page_pool.is_init = TRUE;
1020
		LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1020
		LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1021
		LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1021
		LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1022
1022
1023
		/*
1023
		/*
1024
		 * Pre-create SG list, each SG list with
1024
		 * Pre-create SG list, each SG list with
1025
		 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1025
		 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1026
		 * segment has one page buffer
1026
		 * segment has one page buffer
1027
		 */
1027
		 */
1028
		for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1028
		for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1029
	        	sgl_node = malloc(sizeof(struct hv_sgl_node),
1029
	        	sgl_node = malloc(sizeof(struct hv_sgl_node),
1030
			    M_DEVBUF, M_WAITOK|M_ZERO);
1030
			    M_DEVBUF, M_WAITOK|M_ZERO);
1031
1031
1032
			sgl_node->sgl_data =
1032
			sgl_node->sgl_data =
1033
			    sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1033
			    sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1034
			    M_WAITOK|M_ZERO);
1034
			    M_WAITOK|M_ZERO);
1035
1035
1036
			for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1036
			for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1037
				tmp_buff = malloc(PAGE_SIZE,
1037
				tmp_buff = malloc(PAGE_SIZE,
1038
				    M_DEVBUF, M_WAITOK|M_ZERO);
1038
				    M_DEVBUF, M_WAITOK|M_ZERO);
1039
1039
1040
				sgl_node->sgl_data->sg_segs[j].ss_paddr =
1040
				sgl_node->sgl_data->sg_segs[j].ss_paddr =
1041
				    (vm_paddr_t)tmp_buff;
1041
				    (vm_paddr_t)tmp_buff;
1042
			}
1042
			}
1043
1043
1044
			LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1044
			LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1045
			    sgl_node, link);
1045
			    sgl_node, link);
1046
		}
1046
		}
1047
	}
1047
	}
1048
1048
1049
	sc->hs_destroy = FALSE;
1049
	sc->hs_destroy = FALSE;
1050
	sc->hs_drain_notify = FALSE;
1050
	sc->hs_drain_notify = FALSE;
1051
	sc->hs_open_multi_channel = FALSE;
1051
	sc->hs_open_multi_channel = FALSE;
1052
	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1052
	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1053
1053
1054
	ret = hv_storvsc_connect_vsp(hv_dev);
1054
	ret = hv_storvsc_connect_vsp(hv_dev);
1055
	if (ret != 0) {
1055
	if (ret != 0) {
1056
		goto cleanup;
1056
		goto cleanup;
1057
	}
1057
	}
1058
1058
1059
	/*
1059
	/*
1060
	 * Create the device queue.
1060
	 * Create the device queue.
1061
	 * Hyper-V maps each target to one SCSI HBA
1061
	 * Hyper-V maps each target to one SCSI HBA
1062
	 */
1062
	 */
1063
	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1063
	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1064
	if (devq == NULL) {
1064
	if (devq == NULL) {
1065
		device_printf(dev, "Failed to alloc device queue\n");
1065
		device_printf(dev, "Failed to alloc device queue\n");
1066
		ret = ENOMEM;
1066
		ret = ENOMEM;
1067
		goto cleanup;
1067
		goto cleanup;
1068
	}
1068
	}
1069
1069
1070
	sc->hs_sim = cam_sim_alloc(storvsc_action,
1070
	sc->hs_sim = cam_sim_alloc(storvsc_action,
1071
				storvsc_poll,
1071
				storvsc_poll,
1072
				sc->hs_drv_props->drv_name,
1072
				sc->hs_drv_props->drv_name,
1073
				sc,
1073
				sc,
1074
				sc->hs_unit,
1074
				sc->hs_unit,
1075
				&sc->hs_lock, 1,
1075
				&sc->hs_lock, 1,
1076
				sc->hs_drv_props->drv_max_ios_per_target,
1076
				sc->hs_drv_props->drv_max_ios_per_target,
1077
				devq);
1077
				devq);
1078
1078
1079
	if (sc->hs_sim == NULL) {
1079
	if (sc->hs_sim == NULL) {
1080
		device_printf(dev, "Failed to alloc sim\n");
1080
		device_printf(dev, "Failed to alloc sim\n");
1081
		cam_simq_free(devq);
1081
		cam_simq_free(devq);
1082
		ret = ENOMEM;
1082
		ret = ENOMEM;
1083
		goto cleanup;
1083
		goto cleanup;
1084
	}
1084
	}
1085
1085
1086
	mtx_lock(&sc->hs_lock);
1086
	mtx_lock(&sc->hs_lock);
1087
	/* bus_id is set to 0, need to get it from VMBUS channel query? */
1087
	/* bus_id is set to 0, need to get it from VMBUS channel query? */
1088
	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1088
	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1089
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1089
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1090
		mtx_unlock(&sc->hs_lock);
1090
		mtx_unlock(&sc->hs_lock);
1091
		device_printf(dev, "Unable to register SCSI bus\n");
1091
		device_printf(dev, "Unable to register SCSI bus\n");
1092
		ret = ENXIO;
1092
		ret = ENXIO;
1093
		goto cleanup;
1093
		goto cleanup;
1094
	}
1094
	}
1095
1095
1096
	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1096
	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1097
		 cam_sim_path(sc->hs_sim),
1097
		 cam_sim_path(sc->hs_sim),
1098
		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1098
		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1099
		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1099
		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1100
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1100
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1101
		mtx_unlock(&sc->hs_lock);
1101
		mtx_unlock(&sc->hs_lock);
1102
		device_printf(dev, "Unable to create path\n");
1102
		device_printf(dev, "Unable to create path\n");
1103
		ret = ENXIO;
1103
		ret = ENXIO;
1104
		goto cleanup;
1104
		goto cleanup;
1105
	}
1105
	}
1106
1106
1107
	mtx_unlock(&sc->hs_lock);
1107
	mtx_unlock(&sc->hs_lock);
1108
1108
1109
	root_mount_rel(root_mount_token);
1109
	root_mount_rel(root_mount_token);
1110
	return (0);
1110
	return (0);
1111
1111
1112
1112
1113
cleanup:
1113
cleanup:
1114
	root_mount_rel(root_mount_token);
1114
	root_mount_rel(root_mount_token);
1115
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1115
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1116
		reqp = LIST_FIRST(&sc->hs_free_list);
1116
		reqp = LIST_FIRST(&sc->hs_free_list);
1117
		LIST_REMOVE(reqp, link);
1117
		LIST_REMOVE(reqp, link);
1118
		free(reqp, M_DEVBUF);
1118
		free(reqp, M_DEVBUF);
1119
	}
1119
	}
1120
1120
1121
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1121
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1122
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1122
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1123
		LIST_REMOVE(sgl_node, link);
1123
		LIST_REMOVE(sgl_node, link);
1124
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1124
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1125
			if (NULL !=
1125
			if (NULL !=
1126
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1126
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1127
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1127
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1128
			}
1128
			}
1129
		}
1129
		}
1130
		sglist_free(sgl_node->sgl_data);
1130
		sglist_free(sgl_node->sgl_data);
1131
		free(sgl_node, M_DEVBUF);
1131
		free(sgl_node, M_DEVBUF);
1132
	}
1132
	}
1133
1133
1134
	return (ret);
1134
	return (ret);
1135
}
1135
}
1136
1136
1137
/**
1137
/**
1138
 * @brief StorVSC device detach function
1138
 * @brief StorVSC device detach function
1139
 *
1139
 *
1140
 * This function is responsible for safely detaching a
1140
 * This function is responsible for safely detaching a
1141
 * StorVSC device.  This includes waiting for inbound responses
1141
 * StorVSC device.  This includes waiting for inbound responses
1142
 * to complete and freeing associated per-device structures.
1142
 * to complete and freeing associated per-device structures.
1143
 *
1143
 *
1144
 * @param dev a device
1144
 * @param dev a device
1145
 * returns 0 on success
1145
 * returns 0 on success
1146
 */
1146
 */
1147
static int
1147
static int
1148
storvsc_detach(device_t dev)
1148
storvsc_detach(device_t dev)
1149
{
1149
{
1150
	struct storvsc_softc *sc = device_get_softc(dev);
1150
	struct storvsc_softc *sc = device_get_softc(dev);
1151
	struct hv_storvsc_request *reqp = NULL;
1151
	struct hv_storvsc_request *reqp = NULL;
1152
	struct hv_device *hv_device = vmbus_get_devctx(dev);
1152
	struct hv_device *hv_device = vmbus_get_devctx(dev);
1153
	struct hv_sgl_node *sgl_node = NULL;
1153
	struct hv_sgl_node *sgl_node = NULL;
1154
	int j = 0;
1154
	int j = 0;
1155
1155
1156
	mtx_lock(&hv_device->channel->inbound_lock);
1156
	mtx_lock(&hv_device->channel->inbound_lock);
1157
	sc->hs_destroy = TRUE;
1157
	sc->hs_destroy = TRUE;
1158
	mtx_unlock(&hv_device->channel->inbound_lock);
1158
	mtx_unlock(&hv_device->channel->inbound_lock);
1159
1159
1160
	/*
1160
	/*
1161
	 * At this point, all outbound traffic should be disabled. We
1161
	 * At this point, all outbound traffic should be disabled. We
1162
	 * only allow inbound traffic (responses) to proceed so that
1162
	 * only allow inbound traffic (responses) to proceed so that
1163
	 * outstanding requests can be completed.
1163
	 * outstanding requests can be completed.
1164
	 */
1164
	 */
1165
1165
1166
	sc->hs_drain_notify = TRUE;
1166
	sc->hs_drain_notify = TRUE;
1167
	sema_wait(&sc->hs_drain_sema);
1167
	sema_wait(&sc->hs_drain_sema);
1168
	sc->hs_drain_notify = FALSE;
1168
	sc->hs_drain_notify = FALSE;
1169
1169
1170
	/*
1170
	/*
1171
	 * Since we have already drained, we don't need to busy wait.
1171
	 * Since we have already drained, we don't need to busy wait.
1172
	 * The call to close the channel will reset the callback
1172
	 * The call to close the channel will reset the callback
1173
	 * under the protection of the incoming channel lock.
1173
	 * under the protection of the incoming channel lock.
1174
	 */
1174
	 */
1175
1175
1176
	hv_vmbus_channel_close(hv_device->channel);
1176
	hv_vmbus_channel_close(hv_device->channel);
1177
1177
1178
	mtx_lock(&sc->hs_lock);
1178
	mtx_lock(&sc->hs_lock);
1179
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1179
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1180
		reqp = LIST_FIRST(&sc->hs_free_list);
1180
		reqp = LIST_FIRST(&sc->hs_free_list);
1181
		LIST_REMOVE(reqp, link);
1181
		LIST_REMOVE(reqp, link);
1182
1182
1183
		free(reqp, M_DEVBUF);
1183
		free(reqp, M_DEVBUF);
1184
	}
1184
	}
1185
	mtx_unlock(&sc->hs_lock);
1185
	mtx_unlock(&sc->hs_lock);
1186
1186
1187
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1187
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1188
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1188
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1189
		LIST_REMOVE(sgl_node, link);
1189
		LIST_REMOVE(sgl_node, link);
1190
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1190
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1191
			if (NULL !=
1191
			if (NULL !=
1192
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1192
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1193
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1193
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1194
			}
1194
			}
1195
		}
1195
		}
1196
		sglist_free(sgl_node->sgl_data);
1196
		sglist_free(sgl_node->sgl_data);
1197
		free(sgl_node, M_DEVBUF);
1197
		free(sgl_node, M_DEVBUF);
1198
	}
1198
	}
1199
	
1199
	
1200
	return (0);
1200
	return (0);
1201
}
1201
}
1202
1202
1203
#if HVS_TIMEOUT_TEST
1203
#if HVS_TIMEOUT_TEST
1204
/**
1204
/**
1205
 * @brief unit test for timed out operations
1205
 * @brief unit test for timed out operations
1206
 *
1206
 *
1207
 * This function provides unit testing capability to simulate
1207
 * This function provides unit testing capability to simulate
1208
 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
1208
 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
1209
 * is required.
1209
 * is required.
1210
 *
1210
 *
1211
 * @param reqp pointer to a request structure
1211
 * @param reqp pointer to a request structure
1212
 * @param opcode SCSI operation being performed
1212
 * @param opcode SCSI operation being performed
1213
 * @param wait if 1, wait for I/O to complete
1213
 * @param wait if 1, wait for I/O to complete
1214
 */
1214
 */
1215
static void
1215
static void
1216
storvsc_timeout_test(struct hv_storvsc_request *reqp,
1216
storvsc_timeout_test(struct hv_storvsc_request *reqp,
1217
		uint8_t opcode, int wait)
1217
		uint8_t opcode, int wait)
1218
{
1218
{
1219
	int ret;
1219
	int ret;
1220
	union ccb *ccb = reqp->ccb;
1220
	union ccb *ccb = reqp->ccb;
1221
	struct storvsc_softc *sc = reqp->softc;
1221
	struct storvsc_softc *sc = reqp->softc;
1222
1222
1223
	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1223
	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1224
		return;
1224
		return;
1225
	}
1225
	}
1226
1226
1227
	if (wait) {
1227
	if (wait) {
1228
		mtx_lock(&reqp->event.mtx);
1228
		mtx_lock(&reqp->event.mtx);
1229
	}
1229
	}
1230
	ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1230
	ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1231
	if (ret != 0) {
1231
	if (ret != 0) {
1232
		if (wait) {
1232
		if (wait) {
1233
			mtx_unlock(&reqp->event.mtx);
1233
			mtx_unlock(&reqp->event.mtx);
1234
		}
1234
		}
1235
		printf("%s: io_request failed with %d.\n",
1235
		printf("%s: io_request failed with %d.\n",
1236
				__func__, ret);
1236
				__func__, ret);
1237
		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1237
		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1238
		mtx_lock(&sc->hs_lock);
1238
		mtx_lock(&sc->hs_lock);
1239
		storvsc_free_request(sc, reqp);
1239
		storvsc_free_request(sc, reqp);
1240
		xpt_done(ccb);
1240
		xpt_done(ccb);
1241
		mtx_unlock(&sc->hs_lock);
1241
		mtx_unlock(&sc->hs_lock);
1242
		return;
1242
		return;
1243
	}
1243
	}
1244
1244
1245
	if (wait) {
1245
	if (wait) {
1246
		xpt_print(ccb->ccb_h.path,
1246
		xpt_print(ccb->ccb_h.path,
1247
				"%u: %s: waiting for IO return.\n",
1247
				"%u: %s: waiting for IO return.\n",
1248
				ticks, __func__);
1248
				ticks, __func__);
1249
		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1249
		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1250
		mtx_unlock(&reqp->event.mtx);
1250
		mtx_unlock(&reqp->event.mtx);
1251
		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1251
		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1252
				ticks, __func__, (ret == 0)?
1252
				ticks, __func__, (ret == 0)?
1253
				"IO return detected" :
1253
				"IO return detected" :
1254
				"IO return not detected");
1254
				"IO return not detected");
1255
		/*
1255
		/*
1256
		 * Now both the timer handler and io done are running
1256
		 * Now both the timer handler and io done are running
1257
		 * simultaneously. We want to confirm the io done always
1257
		 * simultaneously. We want to confirm the io done always
1258
		 * finishes after the timer handler exits. So reqp used by
1258
		 * finishes after the timer handler exits. So reqp used by
1259
		 * timer handler is not freed or stale. Do busy loop for
1259
		 * timer handler is not freed or stale. Do busy loop for
1260
		 * another 1/10 second to make sure io done does
1260
		 * another 1/10 second to make sure io done does
1261
		 * wait for the timer handler to complete.
1261
		 * wait for the timer handler to complete.
1262
		 */
1262
		 */
1263
		DELAY(100*1000);
1263
		DELAY(100*1000);
1264
		mtx_lock(&sc->hs_lock);
1264
		mtx_lock(&sc->hs_lock);
1265
		xpt_print(ccb->ccb_h.path,
1265
		xpt_print(ccb->ccb_h.path,
1266
				"%u: %s: finishing, queue frozen %d, "
1266
				"%u: %s: finishing, queue frozen %d, "
1267
				"ccb status 0x%x scsi_status 0x%x.\n",
1267
				"ccb status 0x%x scsi_status 0x%x.\n",
1268
				ticks, __func__, sc->hs_frozen,
1268
				ticks, __func__, sc->hs_frozen,
1269
				ccb->ccb_h.status,
1269
				ccb->ccb_h.status,
1270
				ccb->csio.scsi_status);
1270
				ccb->csio.scsi_status);
1271
		mtx_unlock(&sc->hs_lock);
1271
		mtx_unlock(&sc->hs_lock);
1272
	}
1272
	}
1273
}
1273
}
1274
#endif /* HVS_TIMEOUT_TEST */
1274
#endif /* HVS_TIMEOUT_TEST */
1275
1275
1276
/**
1276
/**
1277
 * @brief timeout handler for requests
1277
 * @brief timeout handler for requests
1278
 *
1278
 *
1279
 * This function is called as a result of a callout expiring.
1279
 * This function is called as a result of a callout expiring.
1280
 *
1280
 *
1281
 * @param arg pointer to a request
1281
 * @param arg pointer to a request
1282
 */
1282
 */
1283
static void
1283
static void
1284
storvsc_timeout(void *arg)
1284
storvsc_timeout(void *arg)
1285
{
1285
{
1286
	struct hv_storvsc_request *reqp = arg;
1286
	struct hv_storvsc_request *reqp = arg;
1287
	struct storvsc_softc *sc = reqp->softc;
1287
	struct storvsc_softc *sc = reqp->softc;
1288
	union ccb *ccb = reqp->ccb;
1288
	union ccb *ccb = reqp->ccb;
1289
1289
1290
	if (reqp->retries == 0) {
1290
	if (reqp->retries == 0) {
1291
		mtx_lock(&sc->hs_lock);
1291
		mtx_lock(&sc->hs_lock);
1292
		xpt_print(ccb->ccb_h.path,
1292
		xpt_print(ccb->ccb_h.path,
1293
		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1293
		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1294
		    ticks, reqp, ccb->ccb_h.timeout / 1000);
1294
		    ticks, reqp, ccb->ccb_h.timeout / 1000);
1295
		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1295
		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1296
		mtx_unlock(&sc->hs_lock);
1296
		mtx_unlock(&sc->hs_lock);
1297
1297
1298
		reqp->retries++;
1298
		reqp->retries++;
1299
		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1299
		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1300
		    0, storvsc_timeout, reqp, 0);
1300
		    0, storvsc_timeout, reqp, 0);
1301
#if HVS_TIMEOUT_TEST
1301
#if HVS_TIMEOUT_TEST
1302
		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1302
		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1303
#endif
1303
#endif
1304
		return;
1304
		return;
1305
	}
1305
	}
1306
1306
1307
	mtx_lock(&sc->hs_lock);
1307
	mtx_lock(&sc->hs_lock);
1308
	xpt_print(ccb->ccb_h.path,
1308
	xpt_print(ccb->ccb_h.path,
1309
		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1309
		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1310
		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1310
		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1311
		(sc->hs_frozen == 0)?
1311
		(sc->hs_frozen == 0)?
1312
		"freezing the queue" : "the queue is already frozen");
1312
		"freezing the queue" : "the queue is already frozen");
1313
	if (sc->hs_frozen == 0) {
1313
	if (sc->hs_frozen == 0) {
1314
		sc->hs_frozen = 1;
1314
		sc->hs_frozen = 1;
1315
		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1315
		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1316
	}
1316
	}
1317
	mtx_unlock(&sc->hs_lock);
1317
	mtx_unlock(&sc->hs_lock);
1318
	
1318
	
1319
#if HVS_TIMEOUT_TEST
1319
#if HVS_TIMEOUT_TEST
1320
	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1320
	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1321
#endif
1321
#endif
1322
}
1322
}
1323
1323
1324
/**
1324
/**
1325
 * @brief StorVSC device poll function
1325
 * @brief StorVSC device poll function
1326
 *
1326
 *
1327
 * This function is responsible for servicing requests when
1327
 * This function is responsible for servicing requests when
1328
 * interrupts are disabled (i.e when we are dumping core.)
1328
 * interrupts are disabled (i.e when we are dumping core.)
1329
 *
1329
 *
1330
 * @param sim a pointer to a CAM SCSI interface module
1330
 * @param sim a pointer to a CAM SCSI interface module
1331
 */
1331
 */
1332
static void
1332
static void
1333
storvsc_poll(struct cam_sim *sim)
1333
storvsc_poll(struct cam_sim *sim)
1334
{
1334
{
1335
	struct storvsc_softc *sc = cam_sim_softc(sim);
1335
	struct storvsc_softc *sc = cam_sim_softc(sim);
1336
1336
1337
	mtx_assert(&sc->hs_lock, MA_OWNED);
1337
	mtx_assert(&sc->hs_lock, MA_OWNED);
1338
	mtx_unlock(&sc->hs_lock);
1338
	mtx_unlock(&sc->hs_lock);
1339
	hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1339
	hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1340
	mtx_lock(&sc->hs_lock);
1340
	mtx_lock(&sc->hs_lock);
1341
}
1341
}
1342
1342
1343
/**
1343
/**
1344
 * @brief StorVSC device action function
1344
 * @brief StorVSC device action function
1345
 *
1345
 *
1346
 * This function is responsible for handling SCSI operations which
1346
 * This function is responsible for handling SCSI operations which
1347
 * are passed from the CAM layer.  The requests are in the form of
1347
 * are passed from the CAM layer.  The requests are in the form of
1348
 * CAM control blocks which indicate the action being performed.
1348
 * CAM control blocks which indicate the action being performed.
1349
 * Not all actions require converting the request to a VSCSI protocol
1349
 * Not all actions require converting the request to a VSCSI protocol
1350
 * message - these actions can be responded to by this driver.
1350
 * message - these actions can be responded to by this driver.
1351
 * Requests which are destined for a backend storage device are converted
1351
 * Requests which are destined for a backend storage device are converted
1352
 * to a VSCSI protocol message and sent on the channel connection associated
1352
 * to a VSCSI protocol message and sent on the channel connection associated
1353
 * with this device.
1353
 * with this device.
1354
 *
1354
 *
1355
 * @param sim pointer to a CAM SCSI interface module
1355
 * @param sim pointer to a CAM SCSI interface module
1356
 * @param ccb pointer to a CAM control block
1356
 * @param ccb pointer to a CAM control block
1357
 */
1357
 */
1358
static void
1358
static void
1359
storvsc_action(struct cam_sim *sim, union ccb *ccb)
1359
storvsc_action(struct cam_sim *sim, union ccb *ccb)
1360
{
1360
{
1361
	struct storvsc_softc *sc = cam_sim_softc(sim);
1361
	struct storvsc_softc *sc = cam_sim_softc(sim);
1362
	int res;
1362
	int res;
1363
1363
1364
	mtx_assert(&sc->hs_lock, MA_OWNED);
1364
	mtx_assert(&sc->hs_lock, MA_OWNED);
1365
	switch (ccb->ccb_h.func_code) {
1365
	switch (ccb->ccb_h.func_code) {
1366
	case XPT_PATH_INQ: {
1366
	case XPT_PATH_INQ: {
1367
		struct ccb_pathinq *cpi = &ccb->cpi;
1367
		struct ccb_pathinq *cpi = &ccb->cpi;
1368
1368
1369
		cpi->version_num = 1;
1369
		cpi->version_num = 1;
1370
		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1370
		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1371
		cpi->target_sprt = 0;
1371
		cpi->target_sprt = 0;
1372
		cpi->hba_misc = PIM_NOBUSRESET;
1372
		cpi->hba_misc = PIM_NOBUSRESET;
1373
		cpi->hba_eng_cnt = 0;
1373
		cpi->hba_eng_cnt = 0;
1374
		cpi->max_target = STORVSC_MAX_TARGETS;
1374
		cpi->max_target = STORVSC_MAX_TARGETS;
1375
		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1375
		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1376
		cpi->initiator_id = cpi->max_target;
1376
		cpi->initiator_id = cpi->max_target;
1377
		cpi->bus_id = cam_sim_bus(sim);
1377
		cpi->bus_id = cam_sim_bus(sim);
1378
		cpi->base_transfer_speed = 300000;
1378
		cpi->base_transfer_speed = 300000;
1379
		cpi->transport = XPORT_SAS;
1379
		cpi->transport = XPORT_SAS;
1380
		cpi->transport_version = 0;
1380
		cpi->transport_version = 0;
1381
		cpi->protocol = PROTO_SCSI;
1381
		cpi->protocol = PROTO_SCSI;
1382
		cpi->protocol_version = SCSI_REV_SPC2;
1382
		cpi->protocol_version = SCSI_REV_SPC2;
1383
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1383
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1384
		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1384
		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1385
		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1385
		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1386
		cpi->unit_number = cam_sim_unit(sim);
1386
		cpi->unit_number = cam_sim_unit(sim);
1387
1387
1388
		ccb->ccb_h.status = CAM_REQ_CMP;
1388
		ccb->ccb_h.status = CAM_REQ_CMP;
1389
		xpt_done(ccb);
1389
		xpt_done(ccb);
1390
		return;
1390
		return;
1391
	}
1391
	}
1392
	case XPT_GET_TRAN_SETTINGS: {
1392
	case XPT_GET_TRAN_SETTINGS: {
1393
		struct  ccb_trans_settings *cts = &ccb->cts;
1393
		struct  ccb_trans_settings *cts = &ccb->cts;
1394
1394
1395
		cts->transport = XPORT_SAS;
1395
		cts->transport = XPORT_SAS;
1396
		cts->transport_version = 0;
1396
		cts->transport_version = 0;
1397
		cts->protocol = PROTO_SCSI;
1397
		cts->protocol = PROTO_SCSI;
1398
		cts->protocol_version = SCSI_REV_SPC2;
1398
		cts->protocol_version = SCSI_REV_SPC2;
1399
1399
1400
		/* enable tag queuing and disconnected mode */
1400
		/* enable tag queuing and disconnected mode */
1401
		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1401
		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1402
		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1402
		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1403
		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1403
		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1404
		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1404
		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1405
		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1405
		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1406
			
1406
			
1407
		ccb->ccb_h.status = CAM_REQ_CMP;
1407
		ccb->ccb_h.status = CAM_REQ_CMP;
1408
		xpt_done(ccb);
1408
		xpt_done(ccb);
1409
		return;
1409
		return;
1410
	}
1410
	}
1411
	case XPT_SET_TRAN_SETTINGS:	{
1411
	case XPT_SET_TRAN_SETTINGS:	{
1412
		ccb->ccb_h.status = CAM_REQ_CMP;
1412
		ccb->ccb_h.status = CAM_REQ_CMP;
1413
		xpt_done(ccb);
1413
		xpt_done(ccb);
1414
		return;
1414
		return;
1415
	}
1415
	}
1416
	case XPT_CALC_GEOMETRY:{
1416
	case XPT_CALC_GEOMETRY:{
1417
		cam_calc_geometry(&ccb->ccg, 1);
1417
		cam_calc_geometry(&ccb->ccg, 1);
1418
		xpt_done(ccb);
1418
		xpt_done(ccb);
1419
		return;
1419
		return;
1420
	}
1420
	}
1421
	case  XPT_RESET_BUS:
1421
	case  XPT_RESET_BUS:
1422
	case  XPT_RESET_DEV:{
1422
	case  XPT_RESET_DEV:{
1423
#if HVS_HOST_RESET
1423
#if HVS_HOST_RESET
1424
		if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1424
		if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1425
			xpt_print(ccb->ccb_h.path,
1425
			xpt_print(ccb->ccb_h.path,
1426
				"hv_storvsc_host_reset failed with %d\n", res);
1426
				"hv_storvsc_host_reset failed with %d\n", res);
1427
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1427
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1428
			xpt_done(ccb);
1428
			xpt_done(ccb);
1429
			return;
1429
			return;
1430
		}
1430
		}
1431
		ccb->ccb_h.status = CAM_REQ_CMP;
1431
		ccb->ccb_h.status = CAM_REQ_CMP;
1432
		xpt_done(ccb);
1432
		xpt_done(ccb);
1433
		return;
1433
		return;
1434
#else
1434
#else
1435
		xpt_print(ccb->ccb_h.path,
1435
		xpt_print(ccb->ccb_h.path,
1436
				  "%s reset not supported.\n",
1436
				  "%s reset not supported.\n",
1437
				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1437
				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1438
				  "bus" : "dev");
1438
				  "bus" : "dev");
1439
		ccb->ccb_h.status = CAM_REQ_INVALID;
1439
		ccb->ccb_h.status = CAM_REQ_INVALID;
1440
		xpt_done(ccb);
1440
		xpt_done(ccb);
1441
		return;
1441
		return;
1442
#endif	/* HVS_HOST_RESET */
1442
#endif	/* HVS_HOST_RESET */
1443
	}
1443
	}
1444
	case XPT_SCSI_IO:
1444
	case XPT_SCSI_IO:
1445
	case XPT_IMMED_NOTIFY: {
1445
	case XPT_IMMED_NOTIFY: {
1446
		struct hv_storvsc_request *reqp = NULL;
1446
		struct hv_storvsc_request *reqp = NULL;
1447
1447
1448
		if (ccb->csio.cdb_len == 0) {
1448
		if (ccb->csio.cdb_len == 0) {
1449
			panic("cdl_len is 0\n");
1449
			panic("cdl_len is 0\n");
1450
		}
1450
		}
1451
1451
1452
		if (LIST_EMPTY(&sc->hs_free_list)) {
1452
		if (LIST_EMPTY(&sc->hs_free_list)) {
1453
			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1453
			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1454
			if (sc->hs_frozen == 0) {
1454
			if (sc->hs_frozen == 0) {
1455
				sc->hs_frozen = 1;
1455
				sc->hs_frozen = 1;
1456
				xpt_freeze_simq(sim, /* count*/1);
1456
				xpt_freeze_simq(sim, /* count*/1);
1457
			}
1457
			}
1458
			xpt_done(ccb);
1458
			xpt_done(ccb);
1459
			return;
1459
			return;
1460
		}
1460
		}
1461
1461
1462
		reqp = LIST_FIRST(&sc->hs_free_list);
1462
		reqp = LIST_FIRST(&sc->hs_free_list);
1463
		LIST_REMOVE(reqp, link);
1463
		LIST_REMOVE(reqp, link);
1464
1464
1465
		bzero(reqp, sizeof(struct hv_storvsc_request));
1465
		bzero(reqp, sizeof(struct hv_storvsc_request));
1466
		reqp->softc = sc;
1466
		reqp->softc = sc;
1467
		
1467
		
1468
		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1468
		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1469
		if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1469
		if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1470
			ccb->ccb_h.status = CAM_REQ_INVALID;
1470
			ccb->ccb_h.status = CAM_REQ_INVALID;
1471
			xpt_done(ccb);
1471
			xpt_done(ccb);
1472
			return;
1472
			return;
1473
		}
1473
		}
1474
1474
1475
		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1475
		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1476
			callout_init(&reqp->callout, 1);
1476
			callout_init(&reqp->callout, 1);
1477
			callout_reset_sbt(&reqp->callout,
1477
			callout_reset_sbt(&reqp->callout,
1478
			    SBT_1MS * ccb->ccb_h.timeout, 0,
1478
			    SBT_1MS * ccb->ccb_h.timeout, 0,
1479
			    storvsc_timeout, reqp, 0);
1479
			    storvsc_timeout, reqp, 0);
1480
#if HVS_TIMEOUT_TEST
1480
#if HVS_TIMEOUT_TEST
1481
			cv_init(&reqp->event.cv, "storvsc timeout cv");
1481
			cv_init(&reqp->event.cv, "storvsc timeout cv");
1482
			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1482
			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1483
					NULL, MTX_DEF);
1483
					NULL, MTX_DEF);
1484
			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1484
			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1485
				case MODE_SELECT_10:
1485
				case MODE_SELECT_10:
1486
				case SEND_DIAGNOSTIC:
1486
				case SEND_DIAGNOSTIC:
1487
					/* To have timer send the request. */
1487
					/* To have timer send the request. */
1488
					return;
1488
					return;
1489
				default:
1489
				default:
1490
					break;
1490
					break;
1491
			}
1491
			}
1492
#endif /* HVS_TIMEOUT_TEST */
1492
#endif /* HVS_TIMEOUT_TEST */
1493
		}
1493
		}
1494
1494
1495
		if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1495
		if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1496
			xpt_print(ccb->ccb_h.path,
1496
			xpt_print(ccb->ccb_h.path,
1497
				"hv_storvsc_io_request failed with %d\n", res);
1497
				"hv_storvsc_io_request failed with %d\n", res);
1498
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1498
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1499
			storvsc_free_request(sc, reqp);
1499
			storvsc_free_request(sc, reqp);
1500
			xpt_done(ccb);
1500
			xpt_done(ccb);
1501
			return;
1501
			return;
1502
		}
1502
		}
1503
		return;
1503
		return;
1504
	}
1504
	}
1505
1505
1506
	default:
1506
	default:
1507
		ccb->ccb_h.status = CAM_REQ_INVALID;
1507
		ccb->ccb_h.status = CAM_REQ_INVALID;
1508
		xpt_done(ccb);
1508
		xpt_done(ccb);
1509
		return;
1509
		return;
1510
	}
1510
	}
1511
}
1511
}
1512
1512
1513
/**
1513
/**
1514
 * @brief destroy bounce buffer
1514
 * @brief destroy bounce buffer
1515
 *
1515
 *
1516
 * This function is responsible for destroy a Scatter/Gather list
1516
 * This function is responsible for destroy a Scatter/Gather list
1517
 * that create by storvsc_create_bounce_buffer()
1517
 * that create by storvsc_create_bounce_buffer()
1518
 *
1518
 *
1519
 * @param sgl- the Scatter/Gather need be destroy
1519
 * @param sgl- the Scatter/Gather need be destroy
1520
 * @param sg_count- page count of the SG list.
1520
 * @param sg_count- page count of the SG list.
1521
 *
1521
 *
1522
 */
1522
 */
1523
static void
1523
static void
1524
storvsc_destroy_bounce_buffer(struct sglist *sgl)
1524
storvsc_destroy_bounce_buffer(struct sglist *sgl)
1525
{
1525
{
1526
	struct hv_sgl_node *sgl_node = NULL;
1526
	struct hv_sgl_node *sgl_node = NULL;
1527
1527
1528
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1528
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1529
	LIST_REMOVE(sgl_node, link);
1529
	LIST_REMOVE(sgl_node, link);
1530
	if (NULL == sgl_node) {
1530
	if (NULL == sgl_node) {
1531
		printf("storvsc error: not enough in use sgl\n");
1531
		printf("storvsc error: not enough in use sgl\n");
1532
		return;
1532
		return;
1533
	}
1533
	}
1534
	sgl_node->sgl_data = sgl;
1534
	sgl_node->sgl_data = sgl;
1535
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1535
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1536
}
1536
}
1537
1537
1538
/**
1538
/**
1539
 * @brief create bounce buffer
1539
 * @brief create bounce buffer
1540
 *
1540
 *
1541
 * This function is responsible for create a Scatter/Gather list,
1541
 * This function is responsible for create a Scatter/Gather list,
1542
 * which hold several pages that can be aligned with page size.
1542
 * which hold several pages that can be aligned with page size.
1543
 *
1543
 *
1544
 * @param seg_count- SG-list segments count
1544
 * @param seg_count- SG-list segments count
1545
 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1545
 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1546
 * otherwise set used size to page size.
1546
 * otherwise set used size to page size.
1547
 *
1547
 *
1548
 * return NULL if create failed
1548
 * return NULL if create failed
1549
 */
1549
 */
1550
static struct sglist *
1550
static struct sglist *
1551
storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1551
storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1552
{
1552
{
1553
	int i = 0;
1553
	int i = 0;
1554
	struct sglist *bounce_sgl = NULL;
1554
	struct sglist *bounce_sgl = NULL;
1555
	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1555
	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1556
	struct hv_sgl_node *sgl_node = NULL;	
1556
	struct hv_sgl_node *sgl_node = NULL;	
1557
1557
1558
	/* get struct sglist from free_sgl_list */
1558
	/* get struct sglist from free_sgl_list */
1559
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1559
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1560
	LIST_REMOVE(sgl_node, link);
1560
	LIST_REMOVE(sgl_node, link);
1561
	if (NULL == sgl_node) {
1561
	if (NULL == sgl_node) {
1562
		printf("storvsc error: not enough free sgl\n");
1562
		printf("storvsc error: not enough free sgl\n");
1563
		return NULL;
1563
		return NULL;
1564
	}
1564
	}
1565
	bounce_sgl = sgl_node->sgl_data;
1565
	bounce_sgl = sgl_node->sgl_data;
1566
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1566
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1567
1567
1568
	bounce_sgl->sg_maxseg = seg_count;
1568
	bounce_sgl->sg_maxseg = seg_count;
1569
1569
1570
	if (write == WRITE_TYPE)
1570
	if (write == WRITE_TYPE)
1571
		bounce_sgl->sg_nseg = 0;
1571
		bounce_sgl->sg_nseg = 0;
1572
	else
1572
	else
1573
		bounce_sgl->sg_nseg = seg_count;
1573
		bounce_sgl->sg_nseg = seg_count;
1574
1574
1575
	for (i = 0; i < seg_count; i++)
1575
	for (i = 0; i < seg_count; i++)
1576
	        bounce_sgl->sg_segs[i].ss_len = buf_len;
1576
	        bounce_sgl->sg_segs[i].ss_len = buf_len;
1577
1577
1578
	return bounce_sgl;
1578
	return bounce_sgl;
1579
}
1579
}
1580
1580
1581
/**
1581
/**
1582
 * @brief copy data from SG list to bounce buffer
1582
 * @brief copy data from SG list to bounce buffer
1583
 *
1583
 *
1584
 * This function is responsible for copy data from one SG list's segments
1584
 * This function is responsible for copy data from one SG list's segments
1585
 * to another SG list which used as bounce buffer.
1585
 * to another SG list which used as bounce buffer.
1586
 *
1586
 *
1587
 * @param bounce_sgl - the destination SG list
1587
 * @param bounce_sgl - the destination SG list
1588
 * @param orig_sgl - the segment of the source SG list.
1588
 * @param orig_sgl - the segment of the source SG list.
1589
 * @param orig_sgl_count - the count of segments.
1589
 * @param orig_sgl_count - the count of segments.
1590
 * @param orig_sgl_count - indicate which segment need bounce buffer,
1590
 * @param orig_sgl_count - indicate which segment need bounce buffer,
1591
 *  set 1 means need.
1591
 *  set 1 means need.
1592
 *
1592
 *
1593
 */
1593
 */
1594
static void
1594
static void
1595
storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1595
storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1596
			       bus_dma_segment_t *orig_sgl,
1596
			       bus_dma_segment_t *orig_sgl,
1597
			       unsigned int orig_sgl_count,
1597
			       unsigned int orig_sgl_count,
1598
			       uint64_t seg_bits)
1598
			       uint64_t seg_bits)
1599
{
1599
{
1600
	int src_sgl_idx = 0;
1600
	int src_sgl_idx = 0;
1601
1601
1602
	for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1602
	for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1603
		if (seg_bits & (1 << src_sgl_idx)) {
1603
		if (seg_bits & (1 << src_sgl_idx)) {
1604
			memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1604
			memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1605
			    (void*)orig_sgl[src_sgl_idx].ds_addr,
1605
			    (void*)orig_sgl[src_sgl_idx].ds_addr,
1606
			    orig_sgl[src_sgl_idx].ds_len);
1606
			    orig_sgl[src_sgl_idx].ds_len);
1607
1607
1608
			bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1608
			bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1609
			    orig_sgl[src_sgl_idx].ds_len;
1609
			    orig_sgl[src_sgl_idx].ds_len;
1610
		}
1610
		}
1611
	}
1611
	}
1612
}
1612
}
1613
1613
1614
/**
1614
/**
1615
 * @brief copy data from SG list which used as bounce to another SG list
1615
 * @brief copy data from SG list which used as bounce to another SG list
1616
 *
1616
 *
1617
 * This function is responsible for copy data from one SG list with bounce
1617
 * This function is responsible for copy data from one SG list with bounce
1618
 * buffer to another SG list's segments.
1618
 * buffer to another SG list's segments.
1619
 *
1619
 *
1620
 * @param dest_sgl - the destination SG list's segments
1620
 * @param dest_sgl - the destination SG list's segments
1621
 * @param dest_sgl_count - the count of destination SG list's segment.
1621
 * @param dest_sgl_count - the count of destination SG list's segment.
1622
 * @param src_sgl - the source SG list.
1622
 * @param src_sgl - the source SG list.
1623
 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1623
 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1624
 *
1624
 *
1625
 */
1625
 */
1626
void
1626
void
1627
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1627
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1628
				    unsigned int dest_sgl_count,
1628
				    unsigned int dest_sgl_count,
1629
				    struct sglist* src_sgl,
1629
				    struct sglist* src_sgl,
1630
				    uint64_t seg_bits)
1630
				    uint64_t seg_bits)
1631
{
1631
{
1632
	int sgl_idx = 0;
1632
	int sgl_idx = 0;
1633
	
1633
	
1634
	for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1634
	for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1635
		if (seg_bits & (1 << sgl_idx)) {
1635
		if (seg_bits & (1 << sgl_idx)) {
1636
			memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1636
			memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1637
			    (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1637
			    (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1638
			    src_sgl->sg_segs[sgl_idx].ss_len);
1638
			    src_sgl->sg_segs[sgl_idx].ss_len);
1639
		}
1639
		}
1640
	}
1640
	}
1641
}
1641
}
1642
1642
1643
/**
1643
/**
1644
 * @brief check SG list with bounce buffer or not
1644
 * @brief check SG list with bounce buffer or not
1645
 *
1645
 *
1646
 * This function is responsible for check if need bounce buffer for SG list.
1646
 * This function is responsible for check if need bounce buffer for SG list.
1647
 *
1647
 *
1648
 * @param sgl - the SG list's segments
1648
 * @param sgl - the SG list's segments
1649
 * @param sg_count - the count of SG list's segment.
1649
 * @param sg_count - the count of SG list's segment.
1650
 * @param bits - segmengs number that need bounce buffer
1650
 * @param bits - segmengs number that need bounce buffer
1651
 *
1651
 *
1652
 * return -1 if SG list needless bounce buffer
1652
 * return -1 if SG list needless bounce buffer
1653
 */
1653
 */
1654
static int
1654
static int
1655
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1655
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1656
				unsigned int sg_count,
1656
				unsigned int sg_count,
1657
				uint64_t *bits)
1657
				uint64_t *bits)
1658
{
1658
{
1659
	int i = 0;
1659
	int i = 0;
1660
	int offset = 0;
1660
	int offset = 0;
1661
	uint64_t phys_addr = 0;
1661
	uint64_t phys_addr = 0;
1662
	uint64_t tmp_bits = 0;
1662
	uint64_t tmp_bits = 0;
1663
	boolean_t found_hole = FALSE;
1663
	boolean_t found_hole = FALSE;
1664
	boolean_t pre_aligned = TRUE;
1664
	boolean_t pre_aligned = TRUE;
1665
1665
1666
	if (sg_count < 2){
1666
	if (sg_count < 2){
1667
		return -1;
1667
		return -1;
1668
	}
1668
	}
1669
1669
1670
	*bits = 0;
1670
	*bits = 0;
1671
	
1671
	
1672
	phys_addr = vtophys(sgl[0].ds_addr);
1672
	phys_addr = vtophys(sgl[0].ds_addr);
1673
	offset =  phys_addr - trunc_page(phys_addr);
1673
	offset =  phys_addr - trunc_page(phys_addr);
1674
1674
1675
	if (offset != 0) {
1675
	if (offset != 0) {
1676
		pre_aligned = FALSE;
1676
		pre_aligned = FALSE;
1677
		tmp_bits |= 1;
1677
		tmp_bits |= 1;
1678
	}
1678
	}
1679
1679
1680
	for (i = 1; i < sg_count; i++) {
1680
	for (i = 1; i < sg_count; i++) {
1681
		phys_addr = vtophys(sgl[i].ds_addr);
1681
		phys_addr = vtophys(sgl[i].ds_addr);
1682
		offset =  phys_addr - trunc_page(phys_addr);
1682
		offset =  phys_addr - trunc_page(phys_addr);
1683
1683
1684
		if (offset == 0) {
1684
		if (offset == 0) {
1685
			if (FALSE == pre_aligned){
1685
			if (FALSE == pre_aligned){
1686
				/*
1686
				/*
1687
				 * This segment is aligned, if the previous
1687
				 * This segment is aligned, if the previous
1688
				 * one is not aligned, find a hole
1688
				 * one is not aligned, find a hole
1689
				 */
1689
				 */
1690
				found_hole = TRUE;
1690
				found_hole = TRUE;
1691
			}
1691
			}
1692
			pre_aligned = TRUE;
1692
			pre_aligned = TRUE;
1693
		} else {
1693
		} else {
1694
			tmp_bits |= 1 << i;
1694
			tmp_bits |= 1 << i;
1695
			if (!pre_aligned) {
1695
			if (!pre_aligned) {
1696
				if (phys_addr != vtophys(sgl[i-1].ds_addr +
1696
				if (phys_addr != vtophys(sgl[i-1].ds_addr +
1697
				    sgl[i-1].ds_len)) {
1697
				    sgl[i-1].ds_len)) {
1698
					/*
1698
					/*
1699
					 * Check whether connect to previous
1699
					 * Check whether connect to previous
1700
					 * segment,if not, find the hole
1700
					 * segment,if not, find the hole
1701
					 */
1701
					 */
1702
					found_hole = TRUE;
1702
					found_hole = TRUE;
1703
				}
1703
				}
1704
			} else {
1704
			} else {
1705
				found_hole = TRUE;
1705
				found_hole = TRUE;
1706
			}
1706
			}
1707
			pre_aligned = FALSE;
1707
			pre_aligned = FALSE;
1708
		}
1708
		}
1709
	}
1709
	}
1710
1710
1711
	if (!found_hole) {
1711
	if (!found_hole) {
1712
		return (-1);
1712
		return (-1);
1713
	} else {
1713
	} else {
1714
		*bits = tmp_bits;
1714
		*bits = tmp_bits;
1715
		return 0;
1715
		return 0;
1716
	}
1716
	}
1717
}
1717
}
1718
1718
1719
/**
1719
/**
1720
 * @brief Fill in a request structure based on a CAM control block
1720
 * @brief Fill in a request structure based on a CAM control block
1721
 *
1721
 *
1722
 * Fills in a request structure based on the contents of a CAM control
1722
 * Fills in a request structure based on the contents of a CAM control
1723
 * block.  The request structure holds the payload information for
1723
 * block.  The request structure holds the payload information for
1724
 * VSCSI protocol request.
1724
 * VSCSI protocol request.
1725
 *
1725
 *
1726
 * @param ccb pointer to a CAM contorl block
1726
 * @param ccb pointer to a CAM contorl block
1727
 * @param reqp pointer to a request structure
1727
 * @param reqp pointer to a request structure
1728
 */
1728
 */
1729
static int
1729
static int
1730
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1730
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1731
{
1731
{
1732
	struct ccb_scsiio *csio = &ccb->csio;
1732
	struct ccb_scsiio *csio = &ccb->csio;
1733
	uint64_t phys_addr;
1733
	uint64_t phys_addr;
1734
	uint32_t bytes_to_copy = 0;
1734
	uint32_t bytes_to_copy = 0;
1735
	uint32_t pfn_num = 0;
1735
	uint32_t pfn_num = 0;
1736
	uint32_t pfn;
1736
	uint32_t pfn;
1737
	uint64_t not_aligned_seg_bits = 0;
1737
	uint64_t not_aligned_seg_bits = 0;
1738
	
1738
	
1739
	/* refer to struct vmscsi_req for meanings of these two fields */
1739
	/* refer to struct vmscsi_req for meanings of these two fields */
1740
	reqp->vstor_packet.u.vm_srb.port =
1740
	reqp->vstor_packet.u.vm_srb.port =
1741
		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1741
		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1742
	reqp->vstor_packet.u.vm_srb.path_id =
1742
	reqp->vstor_packet.u.vm_srb.path_id =
1743
		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1743
		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1744
1744
1745
	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1745
	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1746
	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1746
	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1747
1747
1748
	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1748
	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1749
	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1749
	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1750
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1750
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1751
			csio->cdb_len);
1751
			csio->cdb_len);
1752
	} else {
1752
	} else {
1753
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1753
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1754
			csio->cdb_len);
1754
			csio->cdb_len);
1755
	}
1755
	}
1756
1756
1757
	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1757
	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1758
	case CAM_DIR_OUT:
1758
	case CAM_DIR_OUT:
1759
		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;	
1759
		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;	
1760
		break;
1760
		break;
1761
	case CAM_DIR_IN:
1761
	case CAM_DIR_IN:
1762
		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1762
		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1763
		break;
1763
		break;
1764
	case CAM_DIR_NONE:
1764
	case CAM_DIR_NONE:
1765
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1765
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1766
		break;
1766
		break;
1767
	default:
1767
	default:
1768
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1768
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1769
		break;
1769
		break;
1770
	}
1770
	}
1771
1771
1772
	reqp->sense_data     = &csio->sense_data;
1772
	reqp->sense_data     = &csio->sense_data;
1773
	reqp->sense_info_len = csio->sense_len;
1773
	reqp->sense_info_len = csio->sense_len;
1774
1774
1775
	reqp->ccb = ccb;
1775
	reqp->ccb = ccb;
1776
1776
1777
	if (0 == csio->dxfer_len) {
1777
	if (0 == csio->dxfer_len) {
1778
		return (0);
1778
		return (0);
1779
	}
1779
	}
1780
1780
1781
	reqp->data_buf.length = csio->dxfer_len;
1781
	reqp->data_buf.length = csio->dxfer_len;
1782
1782
1783
	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1783
	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1784
	case CAM_DATA_VADDR:
1784
	case CAM_DATA_VADDR:
1785
	{
1785
	{
1786
		bytes_to_copy = csio->dxfer_len;
1786
		bytes_to_copy = csio->dxfer_len;
1787
		phys_addr = vtophys(csio->data_ptr);
1787
		phys_addr = vtophys(csio->data_ptr);
1788
		reqp->data_buf.offset = phys_addr & PAGE_MASK;
1788
		reqp->data_buf.offset = phys_addr & PAGE_MASK;
1789
		
1789
		
1790
		while (bytes_to_copy != 0) {
1790
		while (bytes_to_copy != 0) {
1791
			int bytes, page_offset;
1791
			int bytes, page_offset;
1792
			phys_addr =
1792
			phys_addr =
1793
			    vtophys(&csio->data_ptr[reqp->data_buf.length -
1793
			    vtophys(&csio->data_ptr[reqp->data_buf.length -
1794
			    bytes_to_copy]);
1794
			    bytes_to_copy]);
1795
			pfn = phys_addr >> PAGE_SHIFT;
1795
			pfn = phys_addr >> PAGE_SHIFT;
1796
			reqp->data_buf.pfn_array[pfn_num] = pfn;
1796
			reqp->data_buf.pfn_array[pfn_num] = pfn;
1797
			page_offset = phys_addr & PAGE_MASK;
1797
			page_offset = phys_addr & PAGE_MASK;
1798
1798
1799
			bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1799
			bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1800
1800
1801
			bytes_to_copy -= bytes;
1801
			bytes_to_copy -= bytes;
1802
			pfn_num++;
1802
			pfn_num++;
1803
		}
1803
		}
1804
		break;
1804
		break;
1805
	}
1805
	}
1806
1806
1807
	case CAM_DATA_SG:
1807
	case CAM_DATA_SG:
1808
	{
1808
	{
1809
		int i = 0;
1809
		int i = 0;
1810
		int offset = 0;
1810
		int offset = 0;
1811
		int ret;
1811
		int ret;
1812
1812
1813
		bus_dma_segment_t *storvsc_sglist =
1813
		bus_dma_segment_t *storvsc_sglist =
1814
		    (bus_dma_segment_t *)ccb->csio.data_ptr;
1814
		    (bus_dma_segment_t *)ccb->csio.data_ptr;
1815
		u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1815
		u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1816
1816
1817
		printf("Storvsc: get SG I/O operation, %d\n",
1817
		printf("Storvsc: get SG I/O operation, %d\n",
1818
		    reqp->vstor_packet.u.vm_srb.data_in);
1818
		    reqp->vstor_packet.u.vm_srb.data_in);
1819
1819
1820
		if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1820
		if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1821
			printf("Storvsc: %d segments is too much, "
1821
			printf("Storvsc: %d segments is too much, "
1822
			    "only support %d segments\n",
1822
			    "only support %d segments\n",
1823
			    storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1823
			    storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1824
			return (EINVAL);
1824
			return (EINVAL);
1825
		}
1825
		}
1826
1826
1827
		/*
1827
		/*
1828
		 * We create our own bounce buffer function currently. Idealy
1828
		 * We create our own bounce buffer function currently. Idealy
1829
		 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1829
		 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1830
		 * code there is no callback API to check the page alignment of
1830
		 * code there is no callback API to check the page alignment of
1831
		 * middle segments before busdma can decide if a bounce buffer
1831
		 * middle segments before busdma can decide if a bounce buffer
1832
		 * is needed for particular segment. There is callback,
1832
		 * is needed for particular segment. There is callback,
1833
		 * "bus_dma_filter_t *filter", but the parrameters are not
1833
		 * "bus_dma_filter_t *filter", but the parrameters are not
1834
		 * sufficient for storvsc driver.
1834
		 * sufficient for storvsc driver.
1835
		 * TODO:
1835
		 * TODO:
1836
		 *	Add page alignment check in BUS_DMA(9) callback. Once
1836
		 *	Add page alignment check in BUS_DMA(9) callback. Once
1837
		 *	this is complete, switch the following code to use
1837
		 *	this is complete, switch the following code to use
1838
		 *	BUS_DMA(9) for storvsc bounce buffer support.
1838
		 *	BUS_DMA(9) for storvsc bounce buffer support.
1839
		 */
1839
		 */
1840
		/* check if we need to create bounce buffer */
1840
		/* check if we need to create bounce buffer */
1841
		ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1841
		ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1842
		    storvsc_sg_count, &not_aligned_seg_bits);
1842
		    storvsc_sg_count, &not_aligned_seg_bits);
1843
		if (ret != -1) {
1843
		if (ret != -1) {
1844
			reqp->bounce_sgl =
1844
			reqp->bounce_sgl =
1845
			    storvsc_create_bounce_buffer(storvsc_sg_count,
1845
			    storvsc_create_bounce_buffer(storvsc_sg_count,
1846
			    reqp->vstor_packet.u.vm_srb.data_in);
1846
			    reqp->vstor_packet.u.vm_srb.data_in);
1847
			if (NULL == reqp->bounce_sgl) {
1847
			if (NULL == reqp->bounce_sgl) {
1848
				printf("Storvsc_error: "
1848
				printf("Storvsc_error: "
1849
				    "create bounce buffer failed.\n");
1849
				    "create bounce buffer failed.\n");
1850
				return (ENOMEM);
1850
				return (ENOMEM);
1851
			}
1851
			}
1852
1852
1853
			reqp->bounce_sgl_count = storvsc_sg_count;
1853
			reqp->bounce_sgl_count = storvsc_sg_count;
1854
			reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1854
			reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1855
1855
1856
			/*
1856
			/*
1857
			 * if it is write, we need copy the original data
1857
			 * if it is write, we need copy the original data
1858
			 *to bounce buffer
1858
			 *to bounce buffer
1859
			 */
1859
			 */
1860
			if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1860
			if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1861
				storvsc_copy_sgl_to_bounce_buf(
1861
				storvsc_copy_sgl_to_bounce_buf(
1862
				    reqp->bounce_sgl,
1862
				    reqp->bounce_sgl,
1863
				    storvsc_sglist,
1863
				    storvsc_sglist,
1864
				    storvsc_sg_count,
1864
				    storvsc_sg_count,
1865
				    reqp->not_aligned_seg_bits);
1865
				    reqp->not_aligned_seg_bits);
1866
			}
1866
			}
1867
1867
1868
			/* transfer virtual address to physical frame number */
1868
			/* transfer virtual address to physical frame number */
1869
			if (reqp->not_aligned_seg_bits & 0x1){
1869
			if (reqp->not_aligned_seg_bits & 0x1){
1870
 				phys_addr =
1870
 				phys_addr =
1871
				    vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1871
				    vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1872
			}else{
1872
			}else{
1873
 				phys_addr =
1873
 				phys_addr =
1874
					vtophys(storvsc_sglist[0].ds_addr);
1874
					vtophys(storvsc_sglist[0].ds_addr);
1875
			}
1875
			}
1876
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1876
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1877
1877
1878
			pfn = phys_addr >> PAGE_SHIFT;
1878
			pfn = phys_addr >> PAGE_SHIFT;
1879
			reqp->data_buf.pfn_array[0] = pfn;
1879
			reqp->data_buf.pfn_array[0] = pfn;
1880
			
1880
			
1881
			for (i = 1; i < storvsc_sg_count; i++) {
1881
			for (i = 1; i < storvsc_sg_count; i++) {
1882
				if (reqp->not_aligned_seg_bits & (1 << i)) {
1882
				if (reqp->not_aligned_seg_bits & (1 << i)) {
1883
					phys_addr =
1883
					phys_addr =
1884
					    vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1884
					    vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1885
				} else {
1885
				} else {
1886
					phys_addr =
1886
					phys_addr =
1887
					    vtophys(storvsc_sglist[i].ds_addr);
1887
					    vtophys(storvsc_sglist[i].ds_addr);
1888
				}
1888
				}
1889
1889
1890
				pfn = phys_addr >> PAGE_SHIFT;
1890
				pfn = phys_addr >> PAGE_SHIFT;
1891
				reqp->data_buf.pfn_array[i] = pfn;
1891
				reqp->data_buf.pfn_array[i] = pfn;
1892
			}
1892
			}
1893
		} else {
1893
		} else {
1894
			phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1894
			phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1895
1895
1896
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1896
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1897
1897
1898
			for (i = 0; i < storvsc_sg_count; i++) {
1898
			for (i = 0; i < storvsc_sg_count; i++) {
1899
				phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1899
				phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1900
				pfn = phys_addr >> PAGE_SHIFT;
1900
				pfn = phys_addr >> PAGE_SHIFT;
1901
				reqp->data_buf.pfn_array[i] = pfn;
1901
				reqp->data_buf.pfn_array[i] = pfn;
1902
			}
1902
			}
1903
1903
1904
			/* check the last segment cross boundary or not */
1904
			/* check the last segment cross boundary or not */
1905
			offset = phys_addr & PAGE_MASK;
1905
			offset = phys_addr & PAGE_MASK;
1906
			if (offset) {
1906
			if (offset) {
1907
				phys_addr =
1907
				phys_addr =
1908
				    vtophys(storvsc_sglist[i-1].ds_addr +
1908
				    vtophys(storvsc_sglist[i-1].ds_addr +
1909
				    PAGE_SIZE - offset);
1909
				    PAGE_SIZE - offset);
1910
				pfn = phys_addr >> PAGE_SHIFT;
1910
				pfn = phys_addr >> PAGE_SHIFT;
1911
				reqp->data_buf.pfn_array[i] = pfn;
1911
				reqp->data_buf.pfn_array[i] = pfn;
1912
			}
1912
			}
1913
			
1913
			
1914
			reqp->bounce_sgl_count = 0;
1914
			reqp->bounce_sgl_count = 0;
1915
		}
1915
		}
1916
		break;
1916
		break;
1917
	}
1917
	}
1918
	default:
1918
	default:
1919
		printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1919
		printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1920
		return(EINVAL);
1920
		return(EINVAL);
1921
	}
1921
	}
1922
1922
1923
	return(0);
1923
	return(0);
1924
}
1924
}
1925
1925
1926
/**
1926
/**
1927
 * Modified based on scsi_print_inquiry which is responsible to
1928
 * print the detail information for scsi_inquiry_data
1929
 * return 1 if it is valid, 0 otherwise.
1930
 */
1931
static inline int
1932
is_scsi_valid(const struct scsi_inquiry_data *inq_data)
1933
{
1934
	u_int8_t type;
1935
	char vendor[16], product[48], revision[16];
1936
	/**
1937
	 * Check device type and qualifier
1938
	 */
1939
	if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ||
1940
	    SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED)) {
1941
		return (0);
1942
	}
1943
1944
	type = SID_TYPE(inq_data);
1945
	switch (type) {
1946
	case T_DIRECT:
1947
	case T_SEQUENTIAL:
1948
	case T_PRINTER:
1949
	case T_PROCESSOR:
1950
	case T_WORM:
1951
	case T_CDROM:
1952
	case T_SCANNER:
1953
	case T_OPTICAL:
1954
	case T_CHANGER:
1955
	case T_COMM:
1956
	case T_STORARRAY:
1957
	case T_ENCLOSURE:
1958
	case T_RBC:
1959
	case T_OCRW:
1960
	case T_OSD:
1961
	case T_ADC:
1962
		break;
1963
	case T_NODEVICE:
1964
		return (0);
1965
	default:
1966
		return (0);
1967
	}
1968
	/**
1969
	 * Check vendor, product, and revision
1970
	 */
1971
	cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
1972
		   sizeof(vendor));
1973
	cam_strvis(product, inq_data->product, sizeof(inq_data->product),
1974
		   sizeof(product));
1975
	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
1976
		   sizeof(revision));
1977
	if (strlen(vendor) == 0  ||
1978
	    strlen(product) == 0 ||
1979
	    strlen(revision) == 0) {
1980
		return (0);
1981
	}
1982
	return (1);
1983
}
1984
/**
1927
 * @brief completion function before returning to CAM
1985
 * @brief completion function before returning to CAM
1928
 *
1986
 *
1929
 * I/O process has been completed and the result needs
1987
 * I/O process has been completed and the result needs
1930
 * to be passed to the CAM layer.
1988
 * to be passed to the CAM layer.
1931
 * Free resources related to this request.
1989
 * Free resources related to this request.
1932
 *
1990
 *
1933
 * @param reqp pointer to a request structure
1991
 * @param reqp pointer to a request structure
1934
 */
1992
 */
1935
static void
1993
static void
1936
storvsc_io_done(struct hv_storvsc_request *reqp)
1994
storvsc_io_done(struct hv_storvsc_request *reqp)
1937
{
1995
{
1938
	union ccb *ccb = reqp->ccb;
1996
	union ccb *ccb = reqp->ccb;
1939
	struct ccb_scsiio *csio = &ccb->csio;
1997
	struct ccb_scsiio *csio = &ccb->csio;
1940
	struct storvsc_softc *sc = reqp->softc;
1998
	struct storvsc_softc *sc = reqp->softc;
1941
	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1999
	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
1942
	bus_dma_segment_t *ori_sglist = NULL;
2000
	bus_dma_segment_t *ori_sglist = NULL;
1943
	int ori_sg_count = 0;
2001
	int ori_sg_count = 0;
1944
2002
1945
	/* destroy bounce buffer if it is used */
2003
	/* destroy bounce buffer if it is used */
1946
	if (reqp->bounce_sgl_count) {
2004
	if (reqp->bounce_sgl_count) {
1947
		ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
2005
		ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
1948
		ori_sg_count = ccb->csio.sglist_cnt;
2006
		ori_sg_count = ccb->csio.sglist_cnt;
1949
2007
1950
		/*
2008
		/*
1951
		 * If it is READ operation, we should copy back the data
2009
		 * If it is READ operation, we should copy back the data
1952
		 * to original SG list.
2010
		 * to original SG list.
1953
		 */
2011
		 */
1954
		if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2012
		if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1955
			storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
2013
			storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
1956
			    ori_sg_count,
2014
			    ori_sg_count,
1957
			    reqp->bounce_sgl,
2015
			    reqp->bounce_sgl,
1958
			    reqp->not_aligned_seg_bits);
2016
			    reqp->not_aligned_seg_bits);
1959
		}
2017
		}
1960
2018
1961
		storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
2019
		storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
1962
		reqp->bounce_sgl_count = 0;
2020
		reqp->bounce_sgl_count = 0;
1963
	}
2021
	}
1964
		
2022
		
1965
	if (reqp->retries > 0) {
2023
	if (reqp->retries > 0) {
1966
		mtx_lock(&sc->hs_lock);
2024
		mtx_lock(&sc->hs_lock);
1967
#if HVS_TIMEOUT_TEST
2025
#if HVS_TIMEOUT_TEST
1968
		xpt_print(ccb->ccb_h.path,
2026
		xpt_print(ccb->ccb_h.path,
1969
			"%u: IO returned after timeout, "
2027
			"%u: IO returned after timeout, "
1970
			"waking up timer handler if any.\n", ticks);
2028
			"waking up timer handler if any.\n", ticks);
1971
		mtx_lock(&reqp->event.mtx);
2029
		mtx_lock(&reqp->event.mtx);
1972
		cv_signal(&reqp->event.cv);
2030
		cv_signal(&reqp->event.cv);
1973
		mtx_unlock(&reqp->event.mtx);
2031
		mtx_unlock(&reqp->event.mtx);
1974
#endif
2032
#endif
1975
		reqp->retries = 0;
2033
		reqp->retries = 0;
1976
		xpt_print(ccb->ccb_h.path,
2034
		xpt_print(ccb->ccb_h.path,
1977
			"%u: IO returned after timeout, "
2035
			"%u: IO returned after timeout, "
1978
			"stopping timer if any.\n", ticks);
2036
			"stopping timer if any.\n", ticks);
1979
		mtx_unlock(&sc->hs_lock);
2037
		mtx_unlock(&sc->hs_lock);
1980
	}
2038
	}
1981
2039
1982
	/*
2040
	/*
1983
	 * callout_drain() will wait for the timer handler to finish
2041
	 * callout_drain() will wait for the timer handler to finish
1984
	 * if it is running. So we don't need any lock to synchronize
2042
	 * if it is running. So we don't need any lock to synchronize
1985
	 * between this routine and the timer handler.
2043
	 * between this routine and the timer handler.
1986
	 * Note that we need to make sure reqp is not freed when timer
2044
	 * Note that we need to make sure reqp is not freed when timer
1987
	 * handler is using or will use it.
2045
	 * handler is using or will use it.
1988
	 */
2046
	 */
1989
	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2047
	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1990
		callout_drain(&reqp->callout);
2048
		callout_drain(&reqp->callout);
1991
	}
2049
	}
1992
2050
1993
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2051
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1994
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2052
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2053
	/**
2054
	 * check whether the SCSI device is valid for INQUIRY cmd.
2055
	 * windows 10 and windows 2016 sends wrong information
2056
	 * to VM for unknown reason. That is why there is is_scsi_valid
2057
	 * check here.
2058
	 */
2059
        const struct scsi_generic *cmd;
2060
        cmd = (const struct scsi_generic *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2061
                csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2062
1995
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2063
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
1996
		ccb->ccb_h.status |= CAM_REQ_CMP;
2064
		if (cmd->opcode == INQUIRY &&
1997
	 } else {
2065
		     is_scsi_valid((struct scsi_inquiry_data *)csio->data_ptr) == 0) {
2066
			ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2067
			if (bootverbose) {
2068
				mtx_lock(&sc->hs_lock);
2069
				xpt_print(ccb->ccb_h.path, 
2070
					"storvsc uninstalled device\n");
2071
				mtx_unlock(&sc->hs_lock);
2072
			}
2073
		} else {
2074
			ccb->ccb_h.status |= CAM_REQ_CMP;
2075
		}
2076
	} else {
1998
		mtx_lock(&sc->hs_lock);
2077
		mtx_lock(&sc->hs_lock);
1999
		xpt_print(ccb->ccb_h.path,
2078
		xpt_print(ccb->ccb_h.path,
2000
			"srovsc scsi_status = %d\n",
2079
			"storvsc scsi_status = %d\n",
2001
			vm_srb->scsi_status);
2080
			vm_srb->scsi_status);
2002
		mtx_unlock(&sc->hs_lock);
2081
		mtx_unlock(&sc->hs_lock);
2003
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2082
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2004
	}
2083
	}
2005
2084
2006
	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2085
	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2007
	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2086
	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2008
2087
2009
	if (reqp->sense_info_len != 0) {
2088
	if (reqp->sense_info_len != 0) {
2010
		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2089
		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2011
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2090
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2012
	}
2091
	}
2013
2092
2014
	mtx_lock(&sc->hs_lock);
2093
	mtx_lock(&sc->hs_lock);
2015
	if (reqp->softc->hs_frozen == 1) {
2094
	if (reqp->softc->hs_frozen == 1) {
2016
		xpt_print(ccb->ccb_h.path,
2095
		xpt_print(ccb->ccb_h.path,
2017
			"%u: storvsc unfreezing softc 0x%p.\n",
2096
			"%u: storvsc unfreezing softc 0x%p.\n",
2018
			ticks, reqp->softc);
2097
			ticks, reqp->softc);
2019
		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2098
		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2020
		reqp->softc->hs_frozen = 0;
2099
		reqp->softc->hs_frozen = 0;
2021
	}
2100
	}
2022
	storvsc_free_request(sc, reqp);
2101
	storvsc_free_request(sc, reqp);
2023
	xpt_done(ccb);
2102
	xpt_done(ccb);
2024
	mtx_unlock(&sc->hs_lock);
2103
	mtx_unlock(&sc->hs_lock);
2025
}
2104
}
2026
2105
2027
/**
2106
/**
2028
 * @brief Free a request structure
2107
 * @brief Free a request structure
2029
 *
2108
 *
2030
 * Free a request structure by returning it to the free list
2109
 * Free a request structure by returning it to the free list
2031
 *
2110
 *
2032
 * @param sc pointer to a softc
2111
 * @param sc pointer to a softc
2033
 * @param reqp pointer to a request structure
2112
 * @param reqp pointer to a request structure
2034
 */	
2113
 */	
2035
static void
2114
static void
2036
storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2115
storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2037
{
2116
{
2038
2117
2039
	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2118
	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2040
}
2119
}
2041
2120
2042
/**
2121
/**
2043
 * @brief Determine type of storage device from GUID
2122
 * @brief Determine type of storage device from GUID
2044
 *
2123
 *
2045
 * Using the type GUID, determine if this is a StorVSC (paravirtual
2124
 * Using the type GUID, determine if this is a StorVSC (paravirtual
2046
 * SCSI or BlkVSC (paravirtual IDE) device.
2125
 * SCSI or BlkVSC (paravirtual IDE) device.
2047
 *
2126
 *
2048
 * @param dev a device
2127
 * @param dev a device
2049
 * returns an enum
2128
 * returns an enum
2050
 */
2129
 */
2051
static enum hv_storage_type
2130
static enum hv_storage_type
2052
storvsc_get_storage_type(device_t dev)
2131
storvsc_get_storage_type(device_t dev)
2053
{
2132
{
2054
	const char *p = vmbus_get_type(dev);
2133
	const char *p = vmbus_get_type(dev);
2055
2134
2056
	if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2135
	if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2057
		return DRIVER_BLKVSC;
2136
		return DRIVER_BLKVSC;
2058
	} else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2137
	} else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2059
		return DRIVER_STORVSC;
2138
		return DRIVER_STORVSC;
2060
	}
2139
	}
2061
	return (DRIVER_UNKNOWN);
2140
	return (DRIVER_UNKNOWN);
2062
}
2141
}
2063
2142

Return to bug 206630