View | Details | Raw Unified | Return to bug 209443
Collapse All | Expand All

(-)b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (-11 / +46 lines)
Lines 1-2170 Link Here
1
/*-
1
/*-
2
 * Copyright (c) 2009-2012,2016 Microsoft Corp.
2
 * Copyright (c) 2009-2012,2016 Microsoft Corp.
3
 * Copyright (c) 2012 NetApp Inc.
3
 * Copyright (c) 2012 NetApp Inc.
4
 * Copyright (c) 2012 Citrix Inc.
4
 * Copyright (c) 2012 Citrix Inc.
5
 * All rights reserved.
5
 * All rights reserved.
6
 *
6
 *
7
 * Redistribution and use in source and binary forms, with or without
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
8
 * modification, are permitted provided that the following conditions
9
 * are met:
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice unmodified, this list of conditions, and the following
11
 *    notice unmodified, this list of conditions, and the following
12
 *    disclaimer.
12
 *    disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
15
 *    documentation and/or other materials provided with the distribution.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
28
29
/**
29
/**
30
 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
30
 * StorVSC driver for Hyper-V.  This driver presents a SCSI HBA interface
31
 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
31
 * to the Comman Access Method (CAM) layer.  CAM control blocks (CCBs) are
32
 * converted into VSCSI protocol messages which are delivered to the parent
32
 * converted into VSCSI protocol messages which are delivered to the parent
33
 * partition StorVSP driver over the Hyper-V VMBUS.
33
 * partition StorVSP driver over the Hyper-V VMBUS.
34
 */
34
 */
35
#include <sys/cdefs.h>
35
#include <sys/cdefs.h>
36
__FBSDID("$FreeBSD$");
36
__FBSDID("$FreeBSD$");
37
37
38
#include <sys/param.h>
38
#include <sys/param.h>
39
#include <sys/proc.h>
39
#include <sys/proc.h>
40
#include <sys/condvar.h>
40
#include <sys/condvar.h>
41
#include <sys/time.h>
41
#include <sys/time.h>
42
#include <sys/systm.h>
42
#include <sys/systm.h>
43
#include <sys/sockio.h>
43
#include <sys/sockio.h>
44
#include <sys/mbuf.h>
44
#include <sys/mbuf.h>
45
#include <sys/malloc.h>
45
#include <sys/malloc.h>
46
#include <sys/module.h>
46
#include <sys/module.h>
47
#include <sys/kernel.h>
47
#include <sys/kernel.h>
48
#include <sys/queue.h>
48
#include <sys/queue.h>
49
#include <sys/lock.h>
49
#include <sys/lock.h>
50
#include <sys/sx.h>
50
#include <sys/sx.h>
51
#include <sys/taskqueue.h>
51
#include <sys/taskqueue.h>
52
#include <sys/bus.h>
52
#include <sys/bus.h>
53
#include <sys/mutex.h>
53
#include <sys/mutex.h>
54
#include <sys/callout.h>
54
#include <sys/callout.h>
55
#include <vm/vm.h>
55
#include <vm/vm.h>
56
#include <vm/pmap.h>
56
#include <vm/pmap.h>
57
#include <vm/uma.h>
57
#include <vm/uma.h>
58
#include <sys/lock.h>
58
#include <sys/lock.h>
59
#include <sys/sema.h>
59
#include <sys/sema.h>
60
#include <sys/sglist.h>
60
#include <sys/sglist.h>
61
#include <machine/bus.h>
61
#include <machine/bus.h>
62
#include <sys/bus_dma.h>
62
#include <sys/bus_dma.h>
63
63
64
#include <cam/cam.h>
64
#include <cam/cam.h>
65
#include <cam/cam_ccb.h>
65
#include <cam/cam_ccb.h>
66
#include <cam/cam_periph.h>
66
#include <cam/cam_periph.h>
67
#include <cam/cam_sim.h>
67
#include <cam/cam_sim.h>
68
#include <cam/cam_xpt_sim.h>
68
#include <cam/cam_xpt_sim.h>
69
#include <cam/cam_xpt_internal.h>
69
#include <cam/cam_xpt_internal.h>
70
#include <cam/cam_debug.h>
70
#include <cam/cam_debug.h>
71
#include <cam/scsi/scsi_all.h>
71
#include <cam/scsi/scsi_all.h>
72
#include <cam/scsi/scsi_message.h>
72
#include <cam/scsi/scsi_message.h>
73
73
74
#include <dev/hyperv/include/hyperv.h>
74
#include <dev/hyperv/include/hyperv.h>
75
#include "hv_vstorage.h"
75
#include "hv_vstorage.h"
76
76
77
#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
77
#define STORVSC_RINGBUFFER_SIZE		(20*PAGE_SIZE)
78
#define STORVSC_MAX_LUNS_PER_TARGET	(64)
78
#define STORVSC_MAX_LUNS_PER_TARGET	(64)
79
#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
79
#define STORVSC_MAX_IO_REQUESTS		(STORVSC_MAX_LUNS_PER_TARGET * 2)
80
#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
80
#define BLKVSC_MAX_IDE_DISKS_PER_TARGET	(1)
81
#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
81
#define BLKVSC_MAX_IO_REQUESTS		STORVSC_MAX_IO_REQUESTS
82
#define STORVSC_MAX_TARGETS		(2)
82
#define STORVSC_MAX_TARGETS		(2)
83
83
84
#define VSTOR_PKT_SIZE	(sizeof(struct vstor_packet) - vmscsi_size_delta)
84
#define VSTOR_PKT_SIZE	(sizeof(struct vstor_packet) - vmscsi_size_delta)
85
85
86
#define HV_ALIGN(x, a) roundup2(x, a)
86
#define HV_ALIGN(x, a) roundup2(x, a)
87
87
88
struct storvsc_softc;
88
struct storvsc_softc;
89
89
90
struct hv_sgl_node {
90
struct hv_sgl_node {
91
	LIST_ENTRY(hv_sgl_node) link;
91
	LIST_ENTRY(hv_sgl_node) link;
92
	struct sglist *sgl_data;
92
	struct sglist *sgl_data;
93
};
93
};
94
94
95
struct hv_sgl_page_pool{
95
struct hv_sgl_page_pool{
96
	LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
96
	LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
97
	LIST_HEAD(, hv_sgl_node) free_sgl_list;
97
	LIST_HEAD(, hv_sgl_node) free_sgl_list;
98
	boolean_t                is_init;
98
	boolean_t                is_init;
99
} g_hv_sgl_page_pool;
99
} g_hv_sgl_page_pool;
100
100
101
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
101
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
102
102
103
enum storvsc_request_type {
103
enum storvsc_request_type {
104
	WRITE_TYPE,
104
	WRITE_TYPE,
105
	READ_TYPE,
105
	READ_TYPE,
106
	UNKNOWN_TYPE
106
	UNKNOWN_TYPE
107
};
107
};
108
108
109
struct hv_storvsc_request {
109
struct hv_storvsc_request {
110
	LIST_ENTRY(hv_storvsc_request) link;
110
	LIST_ENTRY(hv_storvsc_request) link;
111
	struct vstor_packet	vstor_packet;
111
	struct vstor_packet	vstor_packet;
112
	hv_vmbus_multipage_buffer data_buf;
112
	hv_vmbus_multipage_buffer data_buf;
113
	void *sense_data;
113
	void *sense_data;
114
	uint8_t sense_info_len;
114
	uint8_t sense_info_len;
115
	uint8_t retries;
115
	uint8_t retries;
116
	union ccb *ccb;
116
	union ccb *ccb;
117
	struct storvsc_softc *softc;
117
	struct storvsc_softc *softc;
118
	struct callout callout;
118
	struct callout callout;
119
	struct sema synch_sema; /*Synchronize the request/response if needed */
119
	struct sema synch_sema; /*Synchronize the request/response if needed */
120
	struct sglist *bounce_sgl;
120
	struct sglist *bounce_sgl;
121
	unsigned int bounce_sgl_count;
121
	unsigned int bounce_sgl_count;
122
	uint64_t not_aligned_seg_bits;
122
	uint64_t not_aligned_seg_bits;
123
};
123
};
124
124
125
struct storvsc_softc {
125
struct storvsc_softc {
126
	struct hv_device		*hs_dev;
126
	struct hv_device		*hs_dev;
127
	LIST_HEAD(, hv_storvsc_request)	hs_free_list;
127
	LIST_HEAD(, hv_storvsc_request)	hs_free_list;
128
	struct mtx			hs_lock;
128
	struct mtx			hs_lock;
129
	struct storvsc_driver_props	*hs_drv_props;
129
	struct storvsc_driver_props	*hs_drv_props;
130
	int 				hs_unit;
130
	int 				hs_unit;
131
	uint32_t			hs_frozen;
131
	uint32_t			hs_frozen;
132
	struct cam_sim			*hs_sim;
132
	struct cam_sim			*hs_sim;
133
	struct cam_path 		*hs_path;
133
	struct cam_path 		*hs_path;
134
	uint32_t			hs_num_out_reqs;
134
	uint32_t			hs_num_out_reqs;
135
	boolean_t			hs_destroy;
135
	boolean_t			hs_destroy;
136
	boolean_t			hs_drain_notify;
136
	boolean_t			hs_drain_notify;
137
	struct sema 			hs_drain_sema;	
137
	struct sema 			hs_drain_sema;	
138
	struct hv_storvsc_request	hs_init_req;
138
	struct hv_storvsc_request	hs_init_req;
139
	struct hv_storvsc_request	hs_reset_req;
139
	struct hv_storvsc_request	hs_reset_req;
140
};
140
};
141
141
142
142
143
/**
143
/**
144
 * HyperV storvsc timeout testing cases:
144
 * HyperV storvsc timeout testing cases:
145
 * a. IO returned after first timeout;
145
 * a. IO returned after first timeout;
146
 * b. IO returned after second timeout and queue freeze;
146
 * b. IO returned after second timeout and queue freeze;
147
 * c. IO returned while timer handler is running
147
 * c. IO returned while timer handler is running
148
 * The first can be tested by "sg_senddiag -vv /dev/daX",
148
 * The first can be tested by "sg_senddiag -vv /dev/daX",
149
 * and the second and third can be done by
149
 * and the second and third can be done by
150
 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
150
 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
151
 */
151
 */
152
#define HVS_TIMEOUT_TEST 0
152
#define HVS_TIMEOUT_TEST 0
153
153
154
/*
154
/*
155
 * Bus/adapter reset functionality on the Hyper-V host is
155
 * Bus/adapter reset functionality on the Hyper-V host is
156
 * buggy and it will be disabled until
156
 * buggy and it will be disabled until
157
 * it can be further tested.
157
 * it can be further tested.
158
 */
158
 */
159
#define HVS_HOST_RESET 0
159
#define HVS_HOST_RESET 0
160
160
161
struct storvsc_driver_props {
161
struct storvsc_driver_props {
162
	char		*drv_name;
162
	char		*drv_name;
163
	char		*drv_desc;
163
	char		*drv_desc;
164
	uint8_t		drv_max_luns_per_target;
164
	uint8_t		drv_max_luns_per_target;
165
	uint8_t		drv_max_ios_per_target;
165
	uint8_t		drv_max_ios_per_target;
166
	uint32_t	drv_ringbuffer_size;
166
	uint32_t	drv_ringbuffer_size;
167
};
167
};
168
168
169
enum hv_storage_type {
169
enum hv_storage_type {
170
	DRIVER_BLKVSC,
170
	DRIVER_BLKVSC,
171
	DRIVER_STORVSC,
171
	DRIVER_STORVSC,
172
	DRIVER_UNKNOWN
172
	DRIVER_UNKNOWN
173
};
173
};
174
174
175
#define HS_MAX_ADAPTERS 10
175
#define HS_MAX_ADAPTERS 10
176
176
177
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
177
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
178
178
179
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
179
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
180
static const hv_guid gStorVscDeviceType={
180
static const hv_guid gStorVscDeviceType={
181
	.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
181
	.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
182
		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
182
		 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
183
};
183
};
184
184
185
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
185
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
186
static const hv_guid gBlkVscDeviceType={
186
static const hv_guid gBlkVscDeviceType={
187
	.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
187
	.data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
188
		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
188
		 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
189
};
189
};
190
190
191
static struct storvsc_driver_props g_drv_props_table[] = {
191
static struct storvsc_driver_props g_drv_props_table[] = {
192
	{"blkvsc", "Hyper-V IDE Storage Interface",
192
	{"blkvsc", "Hyper-V IDE Storage Interface",
193
	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
193
	 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
194
	 STORVSC_RINGBUFFER_SIZE},
194
	 STORVSC_RINGBUFFER_SIZE},
195
	{"storvsc", "Hyper-V SCSI Storage Interface",
195
	{"storvsc", "Hyper-V SCSI Storage Interface",
196
	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
196
	 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
197
	 STORVSC_RINGBUFFER_SIZE}
197
	 STORVSC_RINGBUFFER_SIZE}
198
};
198
};
199
199
200
/*
200
/*
201
 * Sense buffer size changed in win8; have a run-time
201
 * Sense buffer size changed in win8; have a run-time
202
 * variable to track the size we should use.
202
 * variable to track the size we should use.
203
 */
203
 */
204
static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
204
static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
205
205
206
/*
206
/*
207
 * The size of the vmscsi_request has changed in win8. The
207
 * The size of the vmscsi_request has changed in win8. The
208
 * additional size is for the newly added elements in the
208
 * additional size is for the newly added elements in the
209
 * structure. These elements are valid only when we are talking
209
 * structure. These elements are valid only when we are talking
210
 * to a win8 host.
210
 * to a win8 host.
211
 * Track the correct size we need to apply.
211
 * Track the correct size we need to apply.
212
 */
212
 */
213
static int vmscsi_size_delta;
213
static int vmscsi_size_delta;
214
/*
214
/*
215
 * The storage protocol version is determined during the
215
 * The storage protocol version is determined during the
216
 * initial exchange with the host.  It will indicate which
216
 * initial exchange with the host.  It will indicate which
217
 * storage functionality is available in the host.
217
 * storage functionality is available in the host.
218
*/
218
*/
219
static int vmstor_proto_version;
219
static int vmstor_proto_version;
220
220
221
struct vmstor_proto {
221
struct vmstor_proto {
222
        int proto_version;
222
        int proto_version;
223
        int sense_buffer_size;
223
        int sense_buffer_size;
224
        int vmscsi_size_delta;
224
        int vmscsi_size_delta;
225
};
225
};
226
226
227
static const struct vmstor_proto vmstor_proto_list[] = {
227
static const struct vmstor_proto vmstor_proto_list[] = {
228
        {
228
        {
229
                VMSTOR_PROTOCOL_VERSION_WIN10,
229
                VMSTOR_PROTOCOL_VERSION_WIN10,
230
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
230
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
231
                0
231
                0
232
        },
232
        },
233
        {
233
        {
234
                VMSTOR_PROTOCOL_VERSION_WIN8_1,
234
                VMSTOR_PROTOCOL_VERSION_WIN8_1,
235
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
235
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
236
                0
236
                0
237
        },
237
        },
238
        {
238
        {
239
                VMSTOR_PROTOCOL_VERSION_WIN8,
239
                VMSTOR_PROTOCOL_VERSION_WIN8,
240
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
240
                POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
241
                0
241
                0
242
        },
242
        },
243
        {
243
        {
244
                VMSTOR_PROTOCOL_VERSION_WIN7,
244
                VMSTOR_PROTOCOL_VERSION_WIN7,
245
                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
245
                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
246
                sizeof(struct vmscsi_win8_extension),
246
                sizeof(struct vmscsi_win8_extension),
247
        },
247
        },
248
        {
248
        {
249
                VMSTOR_PROTOCOL_VERSION_WIN6,
249
                VMSTOR_PROTOCOL_VERSION_WIN6,
250
                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
250
                PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
251
                sizeof(struct vmscsi_win8_extension),
251
                sizeof(struct vmscsi_win8_extension),
252
        }
252
        }
253
};
253
};
254
254
255
/* static functions */
255
/* static functions */
256
static int storvsc_probe(device_t dev);
256
static int storvsc_probe(device_t dev);
257
static int storvsc_attach(device_t dev);
257
static int storvsc_attach(device_t dev);
258
static int storvsc_detach(device_t dev);
258
static int storvsc_detach(device_t dev);
259
static void storvsc_poll(struct cam_sim * sim);
259
static void storvsc_poll(struct cam_sim * sim);
260
static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
260
static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
261
static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
261
static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
262
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
262
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
263
static enum hv_storage_type storvsc_get_storage_type(device_t dev);
263
static enum hv_storage_type storvsc_get_storage_type(device_t dev);
264
static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
264
static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
265
static void hv_storvsc_on_channel_callback(void *context);
265
static void hv_storvsc_on_channel_callback(void *context);
266
static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
266
static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
267
					struct vstor_packet *vstor_packet,
267
					struct vstor_packet *vstor_packet,
268
					struct hv_storvsc_request *request);
268
					struct hv_storvsc_request *request);
269
static int hv_storvsc_connect_vsp(struct hv_device *device);
269
static int hv_storvsc_connect_vsp(struct hv_device *device);
270
static void storvsc_io_done(struct hv_storvsc_request *reqp);
270
static void storvsc_io_done(struct hv_storvsc_request *reqp);
271
static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
271
static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
272
				bus_dma_segment_t *orig_sgl,
272
				bus_dma_segment_t *orig_sgl,
273
				unsigned int orig_sgl_count,
273
				unsigned int orig_sgl_count,
274
				uint64_t seg_bits);
274
				uint64_t seg_bits);
275
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
275
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
276
				unsigned int dest_sgl_count,
276
				unsigned int dest_sgl_count,
277
				struct sglist* src_sgl,
277
				struct sglist* src_sgl,
278
				uint64_t seg_bits);
278
				uint64_t seg_bits);
279
279
280
static device_method_t storvsc_methods[] = {
280
static device_method_t storvsc_methods[] = {
281
	/* Device interface */
281
	/* Device interface */
282
	DEVMETHOD(device_probe,		storvsc_probe),
282
	DEVMETHOD(device_probe,		storvsc_probe),
283
	DEVMETHOD(device_attach,	storvsc_attach),
283
	DEVMETHOD(device_attach,	storvsc_attach),
284
	DEVMETHOD(device_detach,	storvsc_detach),
284
	DEVMETHOD(device_detach,	storvsc_detach),
285
	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
285
	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
286
	DEVMETHOD_END
286
	DEVMETHOD_END
287
};
287
};
288
288
289
static driver_t storvsc_driver = {
289
static driver_t storvsc_driver = {
290
	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
290
	"storvsc", storvsc_methods, sizeof(struct storvsc_softc),
291
};
291
};
292
292
293
static devclass_t storvsc_devclass;
293
static devclass_t storvsc_devclass;
294
DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
294
DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
295
MODULE_VERSION(storvsc, 1);
295
MODULE_VERSION(storvsc, 1);
296
MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
296
MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
297
297
298
298
299
/**
299
/**
300
 * The host is capable of sending messages to us that are
300
 * The host is capable of sending messages to us that are
301
 * completely unsolicited. So, we need to address the race
301
 * completely unsolicited. So, we need to address the race
302
 * condition where we may be in the process of unloading the
302
 * condition where we may be in the process of unloading the
303
 * driver when the host may send us an unsolicited message.
303
 * driver when the host may send us an unsolicited message.
304
 * We address this issue by implementing a sequentially
304
 * We address this issue by implementing a sequentially
305
 * consistent protocol:
305
 * consistent protocol:
306
 *
306
 *
307
 * 1. Channel callback is invoked while holding the channel lock
307
 * 1. Channel callback is invoked while holding the channel lock
308
 *    and an unloading driver will reset the channel callback under
308
 *    and an unloading driver will reset the channel callback under
309
 *    the protection of this channel lock.
309
 *    the protection of this channel lock.
310
 *
310
 *
311
 * 2. To ensure bounded wait time for unloading a driver, we don't
311
 * 2. To ensure bounded wait time for unloading a driver, we don't
312
 *    permit outgoing traffic once the device is marked as being
312
 *    permit outgoing traffic once the device is marked as being
313
 *    destroyed.
313
 *    destroyed.
314
 *
314
 *
315
 * 3. Once the device is marked as being destroyed, we only
315
 * 3. Once the device is marked as being destroyed, we only
316
 *    permit incoming traffic to properly account for
316
 *    permit incoming traffic to properly account for
317
 *    packets already sent out.
317
 *    packets already sent out.
318
 */
318
 */
319
static inline struct storvsc_softc *
319
static inline struct storvsc_softc *
320
get_stor_device(struct hv_device *device,
320
get_stor_device(struct hv_device *device,
321
				boolean_t outbound)
321
				boolean_t outbound)
322
{
322
{
323
	struct storvsc_softc *sc;
323
	struct storvsc_softc *sc;
324
324
325
	sc = device_get_softc(device->device);
325
	sc = device_get_softc(device->device);
326
326
327
	if (outbound) {
327
	if (outbound) {
328
		/*
328
		/*
329
		 * Here we permit outgoing I/O only
329
		 * Here we permit outgoing I/O only
330
		 * if the device is not being destroyed.
330
		 * if the device is not being destroyed.
331
		 */
331
		 */
332
332
333
		if (sc->hs_destroy) {
333
		if (sc->hs_destroy) {
334
			sc = NULL;
334
			sc = NULL;
335
		}
335
		}
336
	} else {
336
	} else {
337
		/*
337
		/*
338
		 * inbound case; if being destroyed
338
		 * inbound case; if being destroyed
339
		 * only permit to account for
339
		 * only permit to account for
340
		 * messages already sent out.
340
		 * messages already sent out.
341
		 */
341
		 */
342
		if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
342
		if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
343
			sc = NULL;
343
			sc = NULL;
344
		}
344
		}
345
	}
345
	}
346
	return sc;
346
	return sc;
347
}
347
}
348
348
349
static void
349
static void
350
storvsc_subchan_attach(struct hv_vmbus_channel *new_channel)
350
storvsc_subchan_attach(struct hv_vmbus_channel *new_channel)
351
{
351
{
352
	struct hv_device *device;
352
	struct hv_device *device;
353
	struct storvsc_softc *sc;
353
	struct storvsc_softc *sc;
354
	struct vmstor_chan_props props;
354
	struct vmstor_chan_props props;
355
	int ret = 0;
355
	int ret = 0;
356
356
357
	device = new_channel->device;
357
	device = new_channel->device;
358
	sc = get_stor_device(device, TRUE);
358
	sc = get_stor_device(device, TRUE);
359
	if (sc == NULL)
359
	if (sc == NULL)
360
		return;
360
		return;
361
361
362
	memset(&props, 0, sizeof(props));
362
	memset(&props, 0, sizeof(props));
363
363
364
	ret = hv_vmbus_channel_open(new_channel,
364
	ret = hv_vmbus_channel_open(new_channel,
365
	    sc->hs_drv_props->drv_ringbuffer_size,
365
	    sc->hs_drv_props->drv_ringbuffer_size,
366
  	    sc->hs_drv_props->drv_ringbuffer_size,
366
  	    sc->hs_drv_props->drv_ringbuffer_size,
367
	    (void *)&props,
367
	    (void *)&props,
368
	    sizeof(struct vmstor_chan_props),
368
	    sizeof(struct vmstor_chan_props),
369
	    hv_storvsc_on_channel_callback,
369
	    hv_storvsc_on_channel_callback,
370
	    new_channel);
370
	    new_channel);
371
371
372
	return;
372
	return;
373
}
373
}
374
374
375
/**
375
/**
376
 * @brief Send multi-channel creation request to host
376
 * @brief Send multi-channel creation request to host
377
 *
377
 *
378
 * @param device  a Hyper-V device pointer
378
 * @param device  a Hyper-V device pointer
379
 * @param max_chans  the max channels supported by vmbus
379
 * @param max_chans  the max channels supported by vmbus
380
 */
380
 */
381
static void
381
static void
382
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
382
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
383
{
383
{
384
	struct hv_vmbus_channel **subchan;
384
	struct hv_vmbus_channel **subchan;
385
	struct storvsc_softc *sc;
385
	struct storvsc_softc *sc;
386
	struct hv_storvsc_request *request;
386
	struct hv_storvsc_request *request;
387
	struct vstor_packet *vstor_packet;	
387
	struct vstor_packet *vstor_packet;	
388
	int request_channels_cnt = 0;
388
	int request_channels_cnt = 0;
389
	int ret, i;
389
	int ret, i;
390
390
391
	/* get multichannels count that need to create */
391
	/* get multichannels count that need to create */
392
	request_channels_cnt = MIN(max_chans, mp_ncpus);
392
	request_channels_cnt = MIN(max_chans, mp_ncpus);
393
393
394
	sc = get_stor_device(dev, TRUE);
394
	sc = get_stor_device(dev, TRUE);
395
	if (sc == NULL) {
395
	if (sc == NULL) {
396
		printf("Storvsc_error: get sc failed while send mutilchannel "
396
		printf("Storvsc_error: get sc failed while send mutilchannel "
397
		    "request\n");
397
		    "request\n");
398
		return;
398
		return;
399
	}
399
	}
400
400
401
	request = &sc->hs_init_req;
401
	request = &sc->hs_init_req;
402
402
403
	/* request the host to create multi-channel */
403
	/* request the host to create multi-channel */
404
	memset(request, 0, sizeof(struct hv_storvsc_request));
404
	memset(request, 0, sizeof(struct hv_storvsc_request));
405
	
405
	
406
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
406
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
407
407
408
	vstor_packet = &request->vstor_packet;
408
	vstor_packet = &request->vstor_packet;
409
	
409
	
410
	vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
410
	vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
411
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
411
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
412
	vstor_packet->u.multi_channels_cnt = request_channels_cnt;
412
	vstor_packet->u.multi_channels_cnt = request_channels_cnt;
413
413
414
	ret = hv_vmbus_channel_send_packet(
414
	ret = hv_vmbus_channel_send_packet(
415
	    dev->channel,
415
	    dev->channel,
416
	    vstor_packet,
416
	    vstor_packet,
417
	    VSTOR_PKT_SIZE,
417
	    VSTOR_PKT_SIZE,
418
	    (uint64_t)(uintptr_t)request,
418
	    (uint64_t)(uintptr_t)request,
419
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
419
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
420
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
420
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
421
421
422
	/* wait for 5 seconds */
422
	/* wait for 5 seconds */
423
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
423
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
424
	if (ret != 0) {		
424
	if (ret != 0) {		
425
		printf("Storvsc_error: create multi-channel timeout, %d\n",
425
		printf("Storvsc_error: create multi-channel timeout, %d\n",
426
		    ret);
426
		    ret);
427
		return;
427
		return;
428
	}
428
	}
429
429
430
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
430
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
431
	    vstor_packet->status != 0) {		
431
	    vstor_packet->status != 0) {		
432
		printf("Storvsc_error: create multi-channel invalid operation "
432
		printf("Storvsc_error: create multi-channel invalid operation "
433
		    "(%d) or statue (%u)\n",
433
		    "(%d) or statue (%u)\n",
434
		    vstor_packet->operation, vstor_packet->status);
434
		    vstor_packet->operation, vstor_packet->status);
435
		return;
435
		return;
436
	}
436
	}
437
437
438
	/* Wait for sub-channels setup to complete. */
438
	/* Wait for sub-channels setup to complete. */
439
	subchan = vmbus_get_subchan(dev->channel, request_channels_cnt);
439
	subchan = vmbus_get_subchan(dev->channel, request_channels_cnt);
440
440
441
	/* Attach the sub-channels. */
441
	/* Attach the sub-channels. */
442
	for (i = 0; i < request_channels_cnt; ++i)
442
	for (i = 0; i < request_channels_cnt; ++i)
443
		storvsc_subchan_attach(subchan[i]);
443
		storvsc_subchan_attach(subchan[i]);
444
444
445
	/* Release the sub-channels. */
445
	/* Release the sub-channels. */
446
	vmbus_rel_subchan(subchan, request_channels_cnt);
446
	vmbus_rel_subchan(subchan, request_channels_cnt);
447
447
448
	if (bootverbose)
448
	if (bootverbose)
449
		printf("Storvsc create multi-channel success!\n");
449
		printf("Storvsc create multi-channel success!\n");
450
}
450
}
451
451
452
/**
452
/**
453
 * @brief initialize channel connection to parent partition
453
 * @brief initialize channel connection to parent partition
454
 *
454
 *
455
 * @param dev  a Hyper-V device pointer
455
 * @param dev  a Hyper-V device pointer
456
 * @returns  0 on success, non-zero error on failure
456
 * @returns  0 on success, non-zero error on failure
457
 */
457
 */
458
static int
458
static int
459
hv_storvsc_channel_init(struct hv_device *dev)
459
hv_storvsc_channel_init(struct hv_device *dev)
460
{
460
{
461
	int ret = 0, i;
461
	int ret = 0, i;
462
	struct hv_storvsc_request *request;
462
	struct hv_storvsc_request *request;
463
	struct vstor_packet *vstor_packet;
463
	struct vstor_packet *vstor_packet;
464
	struct storvsc_softc *sc;
464
	struct storvsc_softc *sc;
465
	uint16_t max_chans = 0;
465
	uint16_t max_chans = 0;
466
	boolean_t support_multichannel = FALSE;
466
	boolean_t support_multichannel = FALSE;
467
467
468
	max_chans = 0;
468
	max_chans = 0;
469
	support_multichannel = FALSE;
469
	support_multichannel = FALSE;
470
470
471
	sc = get_stor_device(dev, TRUE);
471
	sc = get_stor_device(dev, TRUE);
472
	if (sc == NULL)
472
	if (sc == NULL)
473
		return (ENODEV);
473
		return (ENODEV);
474
474
475
	request = &sc->hs_init_req;
475
	request = &sc->hs_init_req;
476
	memset(request, 0, sizeof(struct hv_storvsc_request));
476
	memset(request, 0, sizeof(struct hv_storvsc_request));
477
	vstor_packet = &request->vstor_packet;
477
	vstor_packet = &request->vstor_packet;
478
	request->softc = sc;
478
	request->softc = sc;
479
479
480
	/**
480
	/**
481
	 * Initiate the vsc/vsp initialization protocol on the open channel
481
	 * Initiate the vsc/vsp initialization protocol on the open channel
482
	 */
482
	 */
483
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
483
	sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
484
484
485
	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
485
	vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
486
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
486
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
487
487
488
488
489
	ret = hv_vmbus_channel_send_packet(
489
	ret = hv_vmbus_channel_send_packet(
490
			dev->channel,
490
			dev->channel,
491
			vstor_packet,
491
			vstor_packet,
492
			VSTOR_PKT_SIZE,
492
			VSTOR_PKT_SIZE,
493
			(uint64_t)(uintptr_t)request,
493
			(uint64_t)(uintptr_t)request,
494
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
494
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
495
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
495
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
496
496
497
	if (ret != 0)
497
	if (ret != 0)
498
		goto cleanup;
498
		goto cleanup;
499
499
500
	/* wait 5 seconds */
500
	/* wait 5 seconds */
501
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
501
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
502
	if (ret != 0)
502
	if (ret != 0)
503
		goto cleanup;
503
		goto cleanup;
504
504
505
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
505
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
506
		vstor_packet->status != 0) {
506
		vstor_packet->status != 0) {
507
		goto cleanup;
507
		goto cleanup;
508
	}
508
	}
509
509
510
	for (i = 0; i < nitems(vmstor_proto_list); i++) {
510
	for (i = 0; i < nitems(vmstor_proto_list); i++) {
511
		/* reuse the packet for version range supported */
511
		/* reuse the packet for version range supported */
512
512
513
		memset(vstor_packet, 0, sizeof(struct vstor_packet));
513
		memset(vstor_packet, 0, sizeof(struct vstor_packet));
514
		vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
514
		vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
515
		vstor_packet->flags = REQUEST_COMPLETION_FLAG;
515
		vstor_packet->flags = REQUEST_COMPLETION_FLAG;
516
516
517
		vstor_packet->u.version.major_minor =
517
		vstor_packet->u.version.major_minor =
518
			vmstor_proto_list[i].proto_version;
518
			vmstor_proto_list[i].proto_version;
519
519
520
		/* revision is only significant for Windows guests */
520
		/* revision is only significant for Windows guests */
521
		vstor_packet->u.version.revision = 0;
521
		vstor_packet->u.version.revision = 0;
522
522
523
		ret = hv_vmbus_channel_send_packet(
523
		ret = hv_vmbus_channel_send_packet(
524
			dev->channel,
524
			dev->channel,
525
			vstor_packet,
525
			vstor_packet,
526
			VSTOR_PKT_SIZE,
526
			VSTOR_PKT_SIZE,
527
			(uint64_t)(uintptr_t)request,
527
			(uint64_t)(uintptr_t)request,
528
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
528
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
529
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
529
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
530
530
531
		if (ret != 0)
531
		if (ret != 0)
532
			goto cleanup;
532
			goto cleanup;
533
533
534
		/* wait 5 seconds */
534
		/* wait 5 seconds */
535
		ret = sema_timedwait(&request->synch_sema, 5 * hz);
535
		ret = sema_timedwait(&request->synch_sema, 5 * hz);
536
536
537
		if (ret)
537
		if (ret)
538
			goto cleanup;
538
			goto cleanup;
539
539
540
		if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
540
		if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
541
			ret = EINVAL;
541
			ret = EINVAL;
542
			goto cleanup;	
542
			goto cleanup;	
543
		}
543
		}
544
		if (vstor_packet->status == 0) {
544
		if (vstor_packet->status == 0) {
545
			vmstor_proto_version =
545
			vmstor_proto_version =
546
				vmstor_proto_list[i].proto_version;
546
				vmstor_proto_list[i].proto_version;
547
			sense_buffer_size =
547
			sense_buffer_size =
548
				vmstor_proto_list[i].sense_buffer_size;
548
				vmstor_proto_list[i].sense_buffer_size;
549
			vmscsi_size_delta =
549
			vmscsi_size_delta =
550
				vmstor_proto_list[i].vmscsi_size_delta;
550
				vmstor_proto_list[i].vmscsi_size_delta;
551
			break;
551
			break;
552
		}
552
		}
553
	}
553
	}
554
554
555
	if (vstor_packet->status != 0) {
555
	if (vstor_packet->status != 0) {
556
		ret = EINVAL;
556
		ret = EINVAL;
557
		goto cleanup;
557
		goto cleanup;
558
	}
558
	}
559
	/**
559
	/**
560
	 * Query channel properties
560
	 * Query channel properties
561
	 */
561
	 */
562
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
562
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
563
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
563
	vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
564
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
564
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
565
565
566
	ret = hv_vmbus_channel_send_packet(
566
	ret = hv_vmbus_channel_send_packet(
567
				dev->channel,
567
				dev->channel,
568
				vstor_packet,
568
				vstor_packet,
569
				VSTOR_PKT_SIZE,
569
				VSTOR_PKT_SIZE,
570
				(uint64_t)(uintptr_t)request,
570
				(uint64_t)(uintptr_t)request,
571
				HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
571
				HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
572
				HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
572
				HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
573
573
574
	if ( ret != 0)
574
	if ( ret != 0)
575
		goto cleanup;
575
		goto cleanup;
576
576
577
	/* wait 5 seconds */
577
	/* wait 5 seconds */
578
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
578
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
579
579
580
	if (ret != 0)
580
	if (ret != 0)
581
		goto cleanup;
581
		goto cleanup;
582
582
583
	/* TODO: Check returned version */
583
	/* TODO: Check returned version */
584
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
584
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
585
	    vstor_packet->status != 0) {
585
	    vstor_packet->status != 0) {
586
		goto cleanup;
586
		goto cleanup;
587
	}
587
	}
588
588
589
	/* multi-channels feature is supported by WIN8 and above version */
589
	/* multi-channels feature is supported by WIN8 and above version */
590
	max_chans = vstor_packet->u.chan_props.max_channel_cnt;
590
	max_chans = vstor_packet->u.chan_props.max_channel_cnt;
591
	if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
591
	if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
592
	    (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
592
	    (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
593
	    (vstor_packet->u.chan_props.flags &
593
	    (vstor_packet->u.chan_props.flags &
594
	     HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
594
	     HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
595
		support_multichannel = TRUE;
595
		support_multichannel = TRUE;
596
	}
596
	}
597
597
598
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
598
	memset(vstor_packet, 0, sizeof(struct vstor_packet));
599
	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
599
	vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
600
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
600
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
601
601
602
	ret = hv_vmbus_channel_send_packet(
602
	ret = hv_vmbus_channel_send_packet(
603
			dev->channel,
603
			dev->channel,
604
			vstor_packet,
604
			vstor_packet,
605
			VSTOR_PKT_SIZE,
605
			VSTOR_PKT_SIZE,
606
			(uint64_t)(uintptr_t)request,
606
			(uint64_t)(uintptr_t)request,
607
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
607
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
608
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
608
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
609
609
610
	if (ret != 0) {
610
	if (ret != 0) {
611
		goto cleanup;
611
		goto cleanup;
612
	}
612
	}
613
613
614
	/* wait 5 seconds */
614
	/* wait 5 seconds */
615
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
615
	ret = sema_timedwait(&request->synch_sema, 5 * hz);
616
616
617
	if (ret != 0)
617
	if (ret != 0)
618
		goto cleanup;
618
		goto cleanup;
619
619
620
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
620
	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
621
	    vstor_packet->status != 0)
621
	    vstor_packet->status != 0)
622
		goto cleanup;
622
		goto cleanup;
623
623
624
	/*
624
	/*
625
	 * If multi-channel is supported, send multichannel create
625
	 * If multi-channel is supported, send multichannel create
626
	 * request to host.
626
	 * request to host.
627
	 */
627
	 */
628
	if (support_multichannel)
628
	if (support_multichannel)
629
		storvsc_send_multichannel_request(dev, max_chans);
629
		storvsc_send_multichannel_request(dev, max_chans);
630
630
631
cleanup:
631
cleanup:
632
	sema_destroy(&request->synch_sema);
632
	sema_destroy(&request->synch_sema);
633
	return (ret);
633
	return (ret);
634
}
634
}
635
635
636
/**
636
/**
637
 * @brief Open channel connection to paraent partition StorVSP driver
637
 * @brief Open channel connection to paraent partition StorVSP driver
638
 *
638
 *
639
 * Open and initialize channel connection to parent partition StorVSP driver.
639
 * Open and initialize channel connection to parent partition StorVSP driver.
640
 *
640
 *
641
 * @param pointer to a Hyper-V device
641
 * @param pointer to a Hyper-V device
642
 * @returns 0 on success, non-zero error on failure
642
 * @returns 0 on success, non-zero error on failure
643
 */
643
 */
644
static int
644
static int
645
hv_storvsc_connect_vsp(struct hv_device *dev)
645
hv_storvsc_connect_vsp(struct hv_device *dev)
646
{	
646
{	
647
	int ret = 0;
647
	int ret = 0;
648
	struct vmstor_chan_props props;
648
	struct vmstor_chan_props props;
649
	struct storvsc_softc *sc;
649
	struct storvsc_softc *sc;
650
650
651
	sc = device_get_softc(dev->device);
651
	sc = device_get_softc(dev->device);
652
		
652
		
653
	memset(&props, 0, sizeof(struct vmstor_chan_props));
653
	memset(&props, 0, sizeof(struct vmstor_chan_props));
654
654
655
	/*
655
	/*
656
	 * Open the channel
656
	 * Open the channel
657
	 */
657
	 */
658
658
659
	ret = hv_vmbus_channel_open(
659
	ret = hv_vmbus_channel_open(
660
		dev->channel,
660
		dev->channel,
661
		sc->hs_drv_props->drv_ringbuffer_size,
661
		sc->hs_drv_props->drv_ringbuffer_size,
662
		sc->hs_drv_props->drv_ringbuffer_size,
662
		sc->hs_drv_props->drv_ringbuffer_size,
663
		(void *)&props,
663
		(void *)&props,
664
		sizeof(struct vmstor_chan_props),
664
		sizeof(struct vmstor_chan_props),
665
		hv_storvsc_on_channel_callback,
665
		hv_storvsc_on_channel_callback,
666
		dev->channel);
666
		dev->channel);
667
667
668
	if (ret != 0) {
668
	if (ret != 0) {
669
		return ret;
669
		return ret;
670
	}
670
	}
671
671
672
	ret = hv_storvsc_channel_init(dev);
672
	ret = hv_storvsc_channel_init(dev);
673
673
674
	return (ret);
674
	return (ret);
675
}
675
}
676
676
677
#if HVS_HOST_RESET
677
#if HVS_HOST_RESET
678
static int
678
static int
679
hv_storvsc_host_reset(struct hv_device *dev)
679
hv_storvsc_host_reset(struct hv_device *dev)
680
{
680
{
681
	int ret = 0;
681
	int ret = 0;
682
	struct storvsc_softc *sc;
682
	struct storvsc_softc *sc;
683
683
684
	struct hv_storvsc_request *request;
684
	struct hv_storvsc_request *request;
685
	struct vstor_packet *vstor_packet;
685
	struct vstor_packet *vstor_packet;
686
686
687
	sc = get_stor_device(dev, TRUE);
687
	sc = get_stor_device(dev, TRUE);
688
	if (sc == NULL) {
688
	if (sc == NULL) {
689
		return ENODEV;
689
		return ENODEV;
690
	}
690
	}
691
691
692
	request = &sc->hs_reset_req;
692
	request = &sc->hs_reset_req;
693
	request->softc = sc;
693
	request->softc = sc;
694
	vstor_packet = &request->vstor_packet;
694
	vstor_packet = &request->vstor_packet;
695
695
696
	sema_init(&request->synch_sema, 0, "stor synch sema");
696
	sema_init(&request->synch_sema, 0, "stor synch sema");
697
697
698
	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
698
	vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
699
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
699
	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
700
700
701
	ret = hv_vmbus_channel_send_packet(dev->channel,
701
	ret = hv_vmbus_channel_send_packet(dev->channel,
702
			vstor_packet,
702
			vstor_packet,
703
			VSTOR_PKT_SIZE,
703
			VSTOR_PKT_SIZE,
704
			(uint64_t)(uintptr_t)&sc->hs_reset_req,
704
			(uint64_t)(uintptr_t)&sc->hs_reset_req,
705
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
705
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
706
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
706
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
707
707
708
	if (ret != 0) {
708
	if (ret != 0) {
709
		goto cleanup;
709
		goto cleanup;
710
	}
710
	}
711
711
712
	ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
712
	ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
713
713
714
	if (ret) {
714
	if (ret) {
715
		goto cleanup;
715
		goto cleanup;
716
	}
716
	}
717
717
718
718
719
	/*
719
	/*
720
	 * At this point, all outstanding requests in the adapter
720
	 * At this point, all outstanding requests in the adapter
721
	 * should have been flushed out and return to us
721
	 * should have been flushed out and return to us
722
	 */
722
	 */
723
723
724
cleanup:
724
cleanup:
725
	sema_destroy(&request->synch_sema);
725
	sema_destroy(&request->synch_sema);
726
	return (ret);
726
	return (ret);
727
}
727
}
728
#endif /* HVS_HOST_RESET */
728
#endif /* HVS_HOST_RESET */
729
729
730
/**
730
/**
731
 * @brief Function to initiate an I/O request
731
 * @brief Function to initiate an I/O request
732
 *
732
 *
733
 * @param device Hyper-V device pointer
733
 * @param device Hyper-V device pointer
734
 * @param request pointer to a request structure
734
 * @param request pointer to a request structure
735
 * @returns 0 on success, non-zero error on failure
735
 * @returns 0 on success, non-zero error on failure
736
 */
736
 */
737
static int
737
static int
738
hv_storvsc_io_request(struct hv_device *device,
738
hv_storvsc_io_request(struct hv_device *device,
739
					  struct hv_storvsc_request *request)
739
					  struct hv_storvsc_request *request)
740
{
740
{
741
	struct storvsc_softc *sc;
741
	struct storvsc_softc *sc;
742
	struct vstor_packet *vstor_packet = &request->vstor_packet;
742
	struct vstor_packet *vstor_packet = &request->vstor_packet;
743
	struct hv_vmbus_channel* outgoing_channel = NULL;
743
	struct hv_vmbus_channel* outgoing_channel = NULL;
744
	int ret = 0;
744
	int ret = 0;
745
745
746
	sc = get_stor_device(device, TRUE);
746
	sc = get_stor_device(device, TRUE);
747
747
748
	if (sc == NULL) {
748
	if (sc == NULL) {
749
		return ENODEV;
749
		return ENODEV;
750
	}
750
	}
751
751
752
	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
752
	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
753
753
754
	vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
754
	vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
755
	
755
	
756
	vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
756
	vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
757
757
758
	vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
758
	vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
759
759
760
	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
760
	vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
761
761
762
	outgoing_channel = vmbus_select_outgoing_channel(device->channel);
762
	outgoing_channel = vmbus_select_outgoing_channel(device->channel);
763
763
764
	mtx_unlock(&request->softc->hs_lock);
764
	mtx_unlock(&request->softc->hs_lock);
765
	if (request->data_buf.length) {
765
	if (request->data_buf.length) {
766
		ret = hv_vmbus_channel_send_packet_multipagebuffer(
766
		ret = hv_vmbus_channel_send_packet_multipagebuffer(
767
				outgoing_channel,
767
				outgoing_channel,
768
				&request->data_buf,
768
				&request->data_buf,
769
				vstor_packet,
769
				vstor_packet,
770
				VSTOR_PKT_SIZE,
770
				VSTOR_PKT_SIZE,
771
				(uint64_t)(uintptr_t)request);
771
				(uint64_t)(uintptr_t)request);
772
772
773
	} else {
773
	} else {
774
		ret = hv_vmbus_channel_send_packet(
774
		ret = hv_vmbus_channel_send_packet(
775
			outgoing_channel,
775
			outgoing_channel,
776
			vstor_packet,
776
			vstor_packet,
777
			VSTOR_PKT_SIZE,
777
			VSTOR_PKT_SIZE,
778
			(uint64_t)(uintptr_t)request,
778
			(uint64_t)(uintptr_t)request,
779
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
779
			HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
780
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
780
			HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
781
	}
781
	}
782
	mtx_lock(&request->softc->hs_lock);
782
	mtx_lock(&request->softc->hs_lock);
783
783
784
	if (ret != 0) {
784
	if (ret != 0) {
785
		printf("Unable to send packet %p ret %d", vstor_packet, ret);
785
		printf("Unable to send packet %p ret %d", vstor_packet, ret);
786
	} else {
786
	} else {
787
		atomic_add_int(&sc->hs_num_out_reqs, 1);
787
		atomic_add_int(&sc->hs_num_out_reqs, 1);
788
	}
788
	}
789
789
790
	return (ret);
790
	return (ret);
791
}
791
}
792
792
793
793
794
/**
794
/**
795
 * Process IO_COMPLETION_OPERATION and ready
795
 * Process IO_COMPLETION_OPERATION and ready
796
 * the result to be completed for upper layer
796
 * the result to be completed for upper layer
797
 * processing by the CAM layer.
797
 * processing by the CAM layer.
798
 */
798
 */
799
static void
799
static void
800
hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
800
hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
801
			   struct vstor_packet *vstor_packet,
801
			   struct vstor_packet *vstor_packet,
802
			   struct hv_storvsc_request *request)
802
			   struct hv_storvsc_request *request)
803
{
803
{
804
	struct vmscsi_req *vm_srb;
804
	struct vmscsi_req *vm_srb;
805
805
806
	vm_srb = &vstor_packet->u.vm_srb;
806
	vm_srb = &vstor_packet->u.vm_srb;
807
807
808
	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
808
	if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
809
			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
809
			(vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
810
		/* Autosense data available */
810
		/* Autosense data available */
811
811
812
		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
812
		KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
813
				("vm_srb->sense_info_len <= "
813
				("vm_srb->sense_info_len <= "
814
				 "request->sense_info_len"));
814
				 "request->sense_info_len"));
815
815
816
		memcpy(request->sense_data, vm_srb->u.sense_data,
816
		memcpy(request->sense_data, vm_srb->u.sense_data,
817
			vm_srb->sense_info_len);
817
			vm_srb->sense_info_len);
818
818
819
		request->sense_info_len = vm_srb->sense_info_len;
819
		request->sense_info_len = vm_srb->sense_info_len;
820
	}
820
	}
821
821
822
	/* Complete request by passing to the CAM layer */
822
	/* Complete request by passing to the CAM layer */
823
	storvsc_io_done(request);
823
	storvsc_io_done(request);
824
	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
824
	atomic_subtract_int(&sc->hs_num_out_reqs, 1);
825
	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
825
	if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
826
		sema_post(&sc->hs_drain_sema);
826
		sema_post(&sc->hs_drain_sema);
827
	}
827
	}
828
}
828
}
829
829
830
static void
830
static void
831
hv_storvsc_rescan_target(struct storvsc_softc *sc)
831
hv_storvsc_rescan_target(struct storvsc_softc *sc)
832
{
832
{
833
	path_id_t pathid;
833
	path_id_t pathid;
834
	target_id_t targetid;
834
	target_id_t targetid;
835
	union ccb *ccb;
835
	union ccb *ccb;
836
836
837
	pathid = cam_sim_path(sc->hs_sim);
837
	pathid = cam_sim_path(sc->hs_sim);
838
	targetid = CAM_TARGET_WILDCARD;
838
	targetid = CAM_TARGET_WILDCARD;
839
839
840
	/*
840
	/*
841
	 * Allocate a CCB and schedule a rescan.
841
	 * Allocate a CCB and schedule a rescan.
842
	 */
842
	 */
843
	ccb = xpt_alloc_ccb_nowait();
843
	ccb = xpt_alloc_ccb_nowait();
844
	if (ccb == NULL) {
844
	if (ccb == NULL) {
845
		printf("unable to alloc CCB for rescan\n");
845
		printf("unable to alloc CCB for rescan\n");
846
		return;
846
		return;
847
	}
847
	}
848
848
849
	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
849
	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
850
	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
850
	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
851
		printf("unable to create path for rescan, pathid: %u,"
851
		printf("unable to create path for rescan, pathid: %u,"
852
		    "targetid: %u\n", pathid, targetid);
852
		    "targetid: %u\n", pathid, targetid);
853
		xpt_free_ccb(ccb);
853
		xpt_free_ccb(ccb);
854
		return;
854
		return;
855
	}
855
	}
856
856
857
	if (targetid == CAM_TARGET_WILDCARD)
857
	if (targetid == CAM_TARGET_WILDCARD)
858
		ccb->ccb_h.func_code = XPT_SCAN_BUS;
858
		ccb->ccb_h.func_code = XPT_SCAN_BUS;
859
	else
859
	else
860
		ccb->ccb_h.func_code = XPT_SCAN_TGT;
860
		ccb->ccb_h.func_code = XPT_SCAN_TGT;
861
861
862
	xpt_rescan(ccb);
862
	xpt_rescan(ccb);
863
}
863
}
864
864
865
static void
865
static void
866
hv_storvsc_on_channel_callback(void *context)
866
hv_storvsc_on_channel_callback(void *context)
867
{
867
{
868
	int ret = 0;
868
	int ret = 0;
869
	hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
869
	hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
870
	struct hv_device *device = NULL;
870
	struct hv_device *device = NULL;
871
	struct storvsc_softc *sc;
871
	struct storvsc_softc *sc;
872
	uint32_t bytes_recvd;
872
	uint32_t bytes_recvd;
873
	uint64_t request_id;
873
	uint64_t request_id;
874
	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
874
	uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
875
	struct hv_storvsc_request *request;
875
	struct hv_storvsc_request *request;
876
	struct vstor_packet *vstor_packet;
876
	struct vstor_packet *vstor_packet;
877
877
878
	device = channel->device;
878
	device = channel->device;
879
	KASSERT(device, ("device is NULL"));
879
	KASSERT(device, ("device is NULL"));
880
880
881
	sc = get_stor_device(device, FALSE);
881
	sc = get_stor_device(device, FALSE);
882
	if (sc == NULL) {
882
	if (sc == NULL) {
883
		printf("Storvsc_error: get stor device failed.\n");
883
		printf("Storvsc_error: get stor device failed.\n");
884
		return;
884
		return;
885
	}
885
	}
886
886
887
	ret = hv_vmbus_channel_recv_packet(
887
	ret = hv_vmbus_channel_recv_packet(
888
			channel,
888
			channel,
889
			packet,
889
			packet,
890
			roundup2(VSTOR_PKT_SIZE, 8),
890
			roundup2(VSTOR_PKT_SIZE, 8),
891
			&bytes_recvd,
891
			&bytes_recvd,
892
			&request_id);
892
			&request_id);
893
893
894
	while ((ret == 0) && (bytes_recvd > 0)) {
894
	while ((ret == 0) && (bytes_recvd > 0)) {
895
		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
895
		request = (struct hv_storvsc_request *)(uintptr_t)request_id;
896
896
897
		if ((request == &sc->hs_init_req) ||
897
		if ((request == &sc->hs_init_req) ||
898
			(request == &sc->hs_reset_req)) {
898
			(request == &sc->hs_reset_req)) {
899
			memcpy(&request->vstor_packet, packet,
899
			memcpy(&request->vstor_packet, packet,
900
				   sizeof(struct vstor_packet));
900
				   sizeof(struct vstor_packet));
901
			sema_post(&request->synch_sema);
901
			sema_post(&request->synch_sema);
902
		} else {
902
		} else {
903
			vstor_packet = (struct vstor_packet *)packet;
903
			vstor_packet = (struct vstor_packet *)packet;
904
			switch(vstor_packet->operation) {
904
			switch(vstor_packet->operation) {
905
			case VSTOR_OPERATION_COMPLETEIO:
905
			case VSTOR_OPERATION_COMPLETEIO:
906
				if (request == NULL)
906
				if (request == NULL)
907
					panic("VMBUS: storvsc received a "
907
					panic("VMBUS: storvsc received a "
908
					    "packet with NULL request id in "
908
					    "packet with NULL request id in "
909
					    "COMPLETEIO operation.");
909
					    "COMPLETEIO operation.");
910
910
911
				hv_storvsc_on_iocompletion(sc,
911
				hv_storvsc_on_iocompletion(sc,
912
							vstor_packet, request);
912
							vstor_packet, request);
913
				break;
913
				break;
914
			case VSTOR_OPERATION_REMOVEDEVICE:
914
			case VSTOR_OPERATION_REMOVEDEVICE:
915
				printf("VMBUS: storvsc operation %d not "
915
				printf("VMBUS: storvsc operation %d not "
916
				    "implemented.\n", vstor_packet->operation);
916
				    "implemented.\n", vstor_packet->operation);
917
				/* TODO: implement */
917
				/* TODO: implement */
918
				break;
918
				break;
919
			case VSTOR_OPERATION_ENUMERATE_BUS:
919
			case VSTOR_OPERATION_ENUMERATE_BUS:
920
				hv_storvsc_rescan_target(sc);
920
				hv_storvsc_rescan_target(sc);
921
				break;
921
				break;
922
			default:
922
			default:
923
				break;
923
				break;
924
			}			
924
			}			
925
		}
925
		}
926
		ret = hv_vmbus_channel_recv_packet(
926
		ret = hv_vmbus_channel_recv_packet(
927
				channel,
927
				channel,
928
				packet,
928
				packet,
929
				roundup2(VSTOR_PKT_SIZE, 8),
929
				roundup2(VSTOR_PKT_SIZE, 8),
930
				&bytes_recvd,
930
				&bytes_recvd,
931
				&request_id);
931
				&request_id);
932
	}
932
	}
933
}
933
}
934
934
935
/**
935
/**
936
 * @brief StorVSC probe function
936
 * @brief StorVSC probe function
937
 *
937
 *
938
 * Device probe function.  Returns 0 if the input device is a StorVSC
938
 * Device probe function.  Returns 0 if the input device is a StorVSC
939
 * device.  Otherwise, a ENXIO is returned.  If the input device is
939
 * device.  Otherwise, a ENXIO is returned.  If the input device is
940
 * for BlkVSC (paravirtual IDE) device and this support is disabled in
940
 * for BlkVSC (paravirtual IDE) device and this support is disabled in
941
 * favor of the emulated ATA/IDE device, return ENXIO.
941
 * favor of the emulated ATA/IDE device, return ENXIO.
942
 *
942
 *
943
 * @param a device
943
 * @param a device
944
 * @returns 0 on success, ENXIO if not a matcing StorVSC device
944
 * @returns 0 on success, ENXIO if not a matcing StorVSC device
945
 */
945
 */
946
static int
946
static int
947
storvsc_probe(device_t dev)
947
storvsc_probe(device_t dev)
948
{
948
{
949
	int ata_disk_enable = 0;
949
	int ata_disk_enable = 0;
950
	int ret	= ENXIO;
950
	int ret	= ENXIO;
951
	
951
	
952
	switch (storvsc_get_storage_type(dev)) {
952
	switch (storvsc_get_storage_type(dev)) {
953
	case DRIVER_BLKVSC:
953
	case DRIVER_BLKVSC:
954
		if(bootverbose)
954
		if(bootverbose)
955
			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
955
			device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
956
		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
956
		if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
957
			if(bootverbose)
957
			if(bootverbose)
958
				device_printf(dev,
958
				device_printf(dev,
959
					"Enlightened ATA/IDE detected\n");
959
					"Enlightened ATA/IDE detected\n");
960
			device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc);
960
			device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc);
961
			ret = BUS_PROBE_DEFAULT;
961
			ret = BUS_PROBE_DEFAULT;
962
		} else if(bootverbose)
962
		} else if(bootverbose)
963
			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
963
			device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
964
		break;
964
		break;
965
	case DRIVER_STORVSC:
965
	case DRIVER_STORVSC:
966
		if(bootverbose)
966
		if(bootverbose)
967
			device_printf(dev, "Enlightened SCSI device detected\n");
967
			device_printf(dev, "Enlightened SCSI device detected\n");
968
		device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc);
968
		device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc);
969
		ret = BUS_PROBE_DEFAULT;
969
		ret = BUS_PROBE_DEFAULT;
970
		break;
970
		break;
971
	default:
971
	default:
972
		ret = ENXIO;
972
		ret = ENXIO;
973
	}
973
	}
974
	return (ret);
974
	return (ret);
975
}
975
}
976
976
977
/**
977
/**
978
 * @brief StorVSC attach function
978
 * @brief StorVSC attach function
979
 *
979
 *
980
 * Function responsible for allocating per-device structures,
980
 * Function responsible for allocating per-device structures,
981
 * setting up CAM interfaces and scanning for available LUNs to
981
 * setting up CAM interfaces and scanning for available LUNs to
982
 * be used for SCSI device peripherals.
982
 * be used for SCSI device peripherals.
983
 *
983
 *
984
 * @param a device
984
 * @param a device
985
 * @returns 0 on success or an error on failure
985
 * @returns 0 on success or an error on failure
986
 */
986
 */
987
static int
987
static int
988
storvsc_attach(device_t dev)
988
storvsc_attach(device_t dev)
989
{
989
{
990
	struct hv_device *hv_dev = vmbus_get_devctx(dev);
990
	struct hv_device *hv_dev = vmbus_get_devctx(dev);
991
	enum hv_storage_type stor_type;
991
	enum hv_storage_type stor_type;
992
	struct storvsc_softc *sc;
992
	struct storvsc_softc *sc;
993
	struct cam_devq *devq;
993
	struct cam_devq *devq;
994
	int ret, i, j;
994
	int ret, i, j;
995
	struct hv_storvsc_request *reqp;
995
	struct hv_storvsc_request *reqp;
996
	struct root_hold_token *root_mount_token = NULL;
996
	struct root_hold_token *root_mount_token = NULL;
997
	struct hv_sgl_node *sgl_node = NULL;
997
	struct hv_sgl_node *sgl_node = NULL;
998
	void *tmp_buff = NULL;
998
	void *tmp_buff = NULL;
999
999
1000
	/*
1000
	/*
1001
	 * We need to serialize storvsc attach calls.
1001
	 * We need to serialize storvsc attach calls.
1002
	 */
1002
	 */
1003
	root_mount_token = root_mount_hold("storvsc");
1003
	root_mount_token = root_mount_hold("storvsc");
1004
1004
1005
	sc = device_get_softc(dev);
1005
	sc = device_get_softc(dev);
1006
1006
1007
	stor_type = storvsc_get_storage_type(dev);
1007
	stor_type = storvsc_get_storage_type(dev);
1008
1008
1009
	if (stor_type == DRIVER_UNKNOWN) {
1009
	if (stor_type == DRIVER_UNKNOWN) {
1010
		ret = ENODEV;
1010
		ret = ENODEV;
1011
		goto cleanup;
1011
		goto cleanup;
1012
	}
1012
	}
1013
1013
1014
	/* fill in driver specific properties */
1014
	/* fill in driver specific properties */
1015
	sc->hs_drv_props = &g_drv_props_table[stor_type];
1015
	sc->hs_drv_props = &g_drv_props_table[stor_type];
1016
1016
1017
	/* fill in device specific properties */
1017
	/* fill in device specific properties */
1018
	sc->hs_unit	= device_get_unit(dev);
1018
	sc->hs_unit	= device_get_unit(dev);
1019
	sc->hs_dev	= hv_dev;
1019
	sc->hs_dev	= hv_dev;
1020
1020
1021
	LIST_INIT(&sc->hs_free_list);
1021
	LIST_INIT(&sc->hs_free_list);
1022
	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1022
	mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1023
1023
1024
	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1024
	for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1025
		reqp = malloc(sizeof(struct hv_storvsc_request),
1025
		reqp = malloc(sizeof(struct hv_storvsc_request),
1026
				 M_DEVBUF, M_WAITOK|M_ZERO);
1026
				 M_DEVBUF, M_WAITOK|M_ZERO);
1027
		reqp->softc = sc;
1027
		reqp->softc = sc;
1028
1028
1029
		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1029
		LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1030
	}
1030
	}
1031
1031
1032
	/* create sg-list page pool */
1032
	/* create sg-list page pool */
1033
	if (FALSE == g_hv_sgl_page_pool.is_init) {
1033
	if (FALSE == g_hv_sgl_page_pool.is_init) {
1034
		g_hv_sgl_page_pool.is_init = TRUE;
1034
		g_hv_sgl_page_pool.is_init = TRUE;
1035
		LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1035
		LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1036
		LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1036
		LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1037
1037
1038
		/*
1038
		/*
1039
		 * Pre-create SG list, each SG list with
1039
		 * Pre-create SG list, each SG list with
1040
		 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1040
		 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1041
		 * segment has one page buffer
1041
		 * segment has one page buffer
1042
		 */
1042
		 */
1043
		for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1043
		for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1044
	        	sgl_node = malloc(sizeof(struct hv_sgl_node),
1044
	        	sgl_node = malloc(sizeof(struct hv_sgl_node),
1045
			    M_DEVBUF, M_WAITOK|M_ZERO);
1045
			    M_DEVBUF, M_WAITOK|M_ZERO);
1046
1046
1047
			sgl_node->sgl_data =
1047
			sgl_node->sgl_data =
1048
			    sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1048
			    sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1049
			    M_WAITOK|M_ZERO);
1049
			    M_WAITOK|M_ZERO);
1050
1050
1051
			for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1051
			for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1052
				tmp_buff = malloc(PAGE_SIZE,
1052
				tmp_buff = malloc(PAGE_SIZE,
1053
				    M_DEVBUF, M_WAITOK|M_ZERO);
1053
				    M_DEVBUF, M_WAITOK|M_ZERO);
1054
1054
1055
				sgl_node->sgl_data->sg_segs[j].ss_paddr =
1055
				sgl_node->sgl_data->sg_segs[j].ss_paddr =
1056
				    (vm_paddr_t)tmp_buff;
1056
				    (vm_paddr_t)tmp_buff;
1057
			}
1057
			}
1058
1058
1059
			LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1059
			LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1060
			    sgl_node, link);
1060
			    sgl_node, link);
1061
		}
1061
		}
1062
	}
1062
	}
1063
1063
1064
	sc->hs_destroy = FALSE;
1064
	sc->hs_destroy = FALSE;
1065
	sc->hs_drain_notify = FALSE;
1065
	sc->hs_drain_notify = FALSE;
1066
	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1066
	sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1067
1067
1068
	ret = hv_storvsc_connect_vsp(hv_dev);
1068
	ret = hv_storvsc_connect_vsp(hv_dev);
1069
	if (ret != 0) {
1069
	if (ret != 0) {
1070
		goto cleanup;
1070
		goto cleanup;
1071
	}
1071
	}
1072
1072
1073
	/*
1073
	/*
1074
	 * Create the device queue.
1074
	 * Create the device queue.
1075
	 * Hyper-V maps each target to one SCSI HBA
1075
	 * Hyper-V maps each target to one SCSI HBA
1076
	 */
1076
	 */
1077
	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1077
	devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1078
	if (devq == NULL) {
1078
	if (devq == NULL) {
1079
		device_printf(dev, "Failed to alloc device queue\n");
1079
		device_printf(dev, "Failed to alloc device queue\n");
1080
		ret = ENOMEM;
1080
		ret = ENOMEM;
1081
		goto cleanup;
1081
		goto cleanup;
1082
	}
1082
	}
1083
1083
1084
	sc->hs_sim = cam_sim_alloc(storvsc_action,
1084
	sc->hs_sim = cam_sim_alloc(storvsc_action,
1085
				storvsc_poll,
1085
				storvsc_poll,
1086
				sc->hs_drv_props->drv_name,
1086
				sc->hs_drv_props->drv_name,
1087
				sc,
1087
				sc,
1088
				sc->hs_unit,
1088
				sc->hs_unit,
1089
				&sc->hs_lock, 1,
1089
				&sc->hs_lock, 1,
1090
				sc->hs_drv_props->drv_max_ios_per_target,
1090
				sc->hs_drv_props->drv_max_ios_per_target,
1091
				devq);
1091
				devq);
1092
1092
1093
	if (sc->hs_sim == NULL) {
1093
	if (sc->hs_sim == NULL) {
1094
		device_printf(dev, "Failed to alloc sim\n");
1094
		device_printf(dev, "Failed to alloc sim\n");
1095
		cam_simq_free(devq);
1095
		cam_simq_free(devq);
1096
		ret = ENOMEM;
1096
		ret = ENOMEM;
1097
		goto cleanup;
1097
		goto cleanup;
1098
	}
1098
	}
1099
1099
1100
	mtx_lock(&sc->hs_lock);
1100
	mtx_lock(&sc->hs_lock);
1101
	/* bus_id is set to 0, need to get it from VMBUS channel query? */
1101
	/* bus_id is set to 0, need to get it from VMBUS channel query? */
1102
	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1102
	if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1103
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1103
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1104
		mtx_unlock(&sc->hs_lock);
1104
		mtx_unlock(&sc->hs_lock);
1105
		device_printf(dev, "Unable to register SCSI bus\n");
1105
		device_printf(dev, "Unable to register SCSI bus\n");
1106
		ret = ENXIO;
1106
		ret = ENXIO;
1107
		goto cleanup;
1107
		goto cleanup;
1108
	}
1108
	}
1109
1109
1110
	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1110
	if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1111
		 cam_sim_path(sc->hs_sim),
1111
		 cam_sim_path(sc->hs_sim),
1112
		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1112
		CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1113
		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1113
		xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1114
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1114
		cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1115
		mtx_unlock(&sc->hs_lock);
1115
		mtx_unlock(&sc->hs_lock);
1116
		device_printf(dev, "Unable to create path\n");
1116
		device_printf(dev, "Unable to create path\n");
1117
		ret = ENXIO;
1117
		ret = ENXIO;
1118
		goto cleanup;
1118
		goto cleanup;
1119
	}
1119
	}
1120
1120
1121
	mtx_unlock(&sc->hs_lock);
1121
	mtx_unlock(&sc->hs_lock);
1122
1122
1123
	root_mount_rel(root_mount_token);
1123
	root_mount_rel(root_mount_token);
1124
	return (0);
1124
	return (0);
1125
1125
1126
1126
1127
cleanup:
1127
cleanup:
1128
	root_mount_rel(root_mount_token);
1128
	root_mount_rel(root_mount_token);
1129
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1129
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1130
		reqp = LIST_FIRST(&sc->hs_free_list);
1130
		reqp = LIST_FIRST(&sc->hs_free_list);
1131
		LIST_REMOVE(reqp, link);
1131
		LIST_REMOVE(reqp, link);
1132
		free(reqp, M_DEVBUF);
1132
		free(reqp, M_DEVBUF);
1133
	}
1133
	}
1134
1134
1135
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1135
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1136
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1136
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1137
		LIST_REMOVE(sgl_node, link);
1137
		LIST_REMOVE(sgl_node, link);
1138
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1138
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1139
			if (NULL !=
1139
			if (NULL !=
1140
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1140
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1141
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1141
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1142
			}
1142
			}
1143
		}
1143
		}
1144
		sglist_free(sgl_node->sgl_data);
1144
		sglist_free(sgl_node->sgl_data);
1145
		free(sgl_node, M_DEVBUF);
1145
		free(sgl_node, M_DEVBUF);
1146
	}
1146
	}
1147
1147
1148
	return (ret);
1148
	return (ret);
1149
}
1149
}
1150
1150
1151
/**
1151
/**
1152
 * @brief StorVSC device detach function
1152
 * @brief StorVSC device detach function
1153
 *
1153
 *
1154
 * This function is responsible for safely detaching a
1154
 * This function is responsible for safely detaching a
1155
 * StorVSC device.  This includes waiting for inbound responses
1155
 * StorVSC device.  This includes waiting for inbound responses
1156
 * to complete and freeing associated per-device structures.
1156
 * to complete and freeing associated per-device structures.
1157
 *
1157
 *
1158
 * @param dev a device
1158
 * @param dev a device
1159
 * returns 0 on success
1159
 * returns 0 on success
1160
 */
1160
 */
1161
static int
1161
static int
1162
storvsc_detach(device_t dev)
1162
storvsc_detach(device_t dev)
1163
{
1163
{
1164
	struct storvsc_softc *sc = device_get_softc(dev);
1164
	struct storvsc_softc *sc = device_get_softc(dev);
1165
	struct hv_storvsc_request *reqp = NULL;
1165
	struct hv_storvsc_request *reqp = NULL;
1166
	struct hv_device *hv_device = vmbus_get_devctx(dev);
1166
	struct hv_device *hv_device = vmbus_get_devctx(dev);
1167
	struct hv_sgl_node *sgl_node = NULL;
1167
	struct hv_sgl_node *sgl_node = NULL;
1168
	int j = 0;
1168
	int j = 0;
1169
1169
1170
	sc->hs_destroy = TRUE;
1170
	sc->hs_destroy = TRUE;
1171
1171
1172
	/*
1172
	/*
1173
	 * At this point, all outbound traffic should be disabled. We
1173
	 * At this point, all outbound traffic should be disabled. We
1174
	 * only allow inbound traffic (responses) to proceed so that
1174
	 * only allow inbound traffic (responses) to proceed so that
1175
	 * outstanding requests can be completed.
1175
	 * outstanding requests can be completed.
1176
	 */
1176
	 */
1177
1177
1178
	sc->hs_drain_notify = TRUE;
1178
	sc->hs_drain_notify = TRUE;
1179
	sema_wait(&sc->hs_drain_sema);
1179
	sema_wait(&sc->hs_drain_sema);
1180
	sc->hs_drain_notify = FALSE;
1180
	sc->hs_drain_notify = FALSE;
1181
1181
1182
	/*
1182
	/*
1183
	 * Since we have already drained, we don't need to busy wait.
1183
	 * Since we have already drained, we don't need to busy wait.
1184
	 * The call to close the channel will reset the callback
1184
	 * The call to close the channel will reset the callback
1185
	 * under the protection of the incoming channel lock.
1185
	 * under the protection of the incoming channel lock.
1186
	 */
1186
	 */
1187
1187
1188
	hv_vmbus_channel_close(hv_device->channel);
1188
	hv_vmbus_channel_close(hv_device->channel);
1189
1189
1190
	mtx_lock(&sc->hs_lock);
1190
	mtx_lock(&sc->hs_lock);
1191
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1191
	while (!LIST_EMPTY(&sc->hs_free_list)) {
1192
		reqp = LIST_FIRST(&sc->hs_free_list);
1192
		reqp = LIST_FIRST(&sc->hs_free_list);
1193
		LIST_REMOVE(reqp, link);
1193
		LIST_REMOVE(reqp, link);
1194
1194
1195
		free(reqp, M_DEVBUF);
1195
		free(reqp, M_DEVBUF);
1196
	}
1196
	}
1197
	mtx_unlock(&sc->hs_lock);
1197
	mtx_unlock(&sc->hs_lock);
1198
1198
1199
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1199
	while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1200
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1200
		sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1201
		LIST_REMOVE(sgl_node, link);
1201
		LIST_REMOVE(sgl_node, link);
1202
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1202
		for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1203
			if (NULL !=
1203
			if (NULL !=
1204
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1204
			    (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1205
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1205
				free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1206
			}
1206
			}
1207
		}
1207
		}
1208
		sglist_free(sgl_node->sgl_data);
1208
		sglist_free(sgl_node->sgl_data);
1209
		free(sgl_node, M_DEVBUF);
1209
		free(sgl_node, M_DEVBUF);
1210
	}
1210
	}
1211
	
1211
	
1212
	return (0);
1212
	return (0);
1213
}
1213
}
1214
1214
1215
#if HVS_TIMEOUT_TEST
1215
#if HVS_TIMEOUT_TEST
1216
/**
1216
/**
1217
 * @brief unit test for timed out operations
1217
 * @brief unit test for timed out operations
1218
 *
1218
 *
1219
 * This function provides unit testing capability to simulate
1219
 * This function provides unit testing capability to simulate
1220
 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
1220
 * timed out operations.  Recompilation with HV_TIMEOUT_TEST=1
1221
 * is required.
1221
 * is required.
1222
 *
1222
 *
1223
 * @param reqp pointer to a request structure
1223
 * @param reqp pointer to a request structure
1224
 * @param opcode SCSI operation being performed
1224
 * @param opcode SCSI operation being performed
1225
 * @param wait if 1, wait for I/O to complete
1225
 * @param wait if 1, wait for I/O to complete
1226
 */
1226
 */
1227
static void
1227
static void
1228
storvsc_timeout_test(struct hv_storvsc_request *reqp,
1228
storvsc_timeout_test(struct hv_storvsc_request *reqp,
1229
		uint8_t opcode, int wait)
1229
		uint8_t opcode, int wait)
1230
{
1230
{
1231
	int ret;
1231
	int ret;
1232
	union ccb *ccb = reqp->ccb;
1232
	union ccb *ccb = reqp->ccb;
1233
	struct storvsc_softc *sc = reqp->softc;
1233
	struct storvsc_softc *sc = reqp->softc;
1234
1234
1235
	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1235
	if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1236
		return;
1236
		return;
1237
	}
1237
	}
1238
1238
1239
	if (wait) {
1239
	if (wait) {
1240
		mtx_lock(&reqp->event.mtx);
1240
		mtx_lock(&reqp->event.mtx);
1241
	}
1241
	}
1242
	ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1242
	ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1243
	if (ret != 0) {
1243
	if (ret != 0) {
1244
		if (wait) {
1244
		if (wait) {
1245
			mtx_unlock(&reqp->event.mtx);
1245
			mtx_unlock(&reqp->event.mtx);
1246
		}
1246
		}
1247
		printf("%s: io_request failed with %d.\n",
1247
		printf("%s: io_request failed with %d.\n",
1248
				__func__, ret);
1248
				__func__, ret);
1249
		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1249
		ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1250
		mtx_lock(&sc->hs_lock);
1250
		mtx_lock(&sc->hs_lock);
1251
		storvsc_free_request(sc, reqp);
1251
		storvsc_free_request(sc, reqp);
1252
		xpt_done(ccb);
1252
		xpt_done(ccb);
1253
		mtx_unlock(&sc->hs_lock);
1253
		mtx_unlock(&sc->hs_lock);
1254
		return;
1254
		return;
1255
	}
1255
	}
1256
1256
1257
	if (wait) {
1257
	if (wait) {
1258
		xpt_print(ccb->ccb_h.path,
1258
		xpt_print(ccb->ccb_h.path,
1259
				"%u: %s: waiting for IO return.\n",
1259
				"%u: %s: waiting for IO return.\n",
1260
				ticks, __func__);
1260
				ticks, __func__);
1261
		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1261
		ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1262
		mtx_unlock(&reqp->event.mtx);
1262
		mtx_unlock(&reqp->event.mtx);
1263
		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1263
		xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1264
				ticks, __func__, (ret == 0)?
1264
				ticks, __func__, (ret == 0)?
1265
				"IO return detected" :
1265
				"IO return detected" :
1266
				"IO return not detected");
1266
				"IO return not detected");
1267
		/*
1267
		/*
1268
		 * Now both the timer handler and io done are running
1268
		 * Now both the timer handler and io done are running
1269
		 * simultaneously. We want to confirm the io done always
1269
		 * simultaneously. We want to confirm the io done always
1270
		 * finishes after the timer handler exits. So reqp used by
1270
		 * finishes after the timer handler exits. So reqp used by
1271
		 * timer handler is not freed or stale. Do busy loop for
1271
		 * timer handler is not freed or stale. Do busy loop for
1272
		 * another 1/10 second to make sure io done does
1272
		 * another 1/10 second to make sure io done does
1273
		 * wait for the timer handler to complete.
1273
		 * wait for the timer handler to complete.
1274
		 */
1274
		 */
1275
		DELAY(100*1000);
1275
		DELAY(100*1000);
1276
		mtx_lock(&sc->hs_lock);
1276
		mtx_lock(&sc->hs_lock);
1277
		xpt_print(ccb->ccb_h.path,
1277
		xpt_print(ccb->ccb_h.path,
1278
				"%u: %s: finishing, queue frozen %d, "
1278
				"%u: %s: finishing, queue frozen %d, "
1279
				"ccb status 0x%x scsi_status 0x%x.\n",
1279
				"ccb status 0x%x scsi_status 0x%x.\n",
1280
				ticks, __func__, sc->hs_frozen,
1280
				ticks, __func__, sc->hs_frozen,
1281
				ccb->ccb_h.status,
1281
				ccb->ccb_h.status,
1282
				ccb->csio.scsi_status);
1282
				ccb->csio.scsi_status);
1283
		mtx_unlock(&sc->hs_lock);
1283
		mtx_unlock(&sc->hs_lock);
1284
	}
1284
	}
1285
}
1285
}
1286
#endif /* HVS_TIMEOUT_TEST */
1286
#endif /* HVS_TIMEOUT_TEST */
1287
1287
1288
#ifdef notyet
1288
#ifdef notyet
1289
/**
1289
/**
1290
 * @brief timeout handler for requests
1290
 * @brief timeout handler for requests
1291
 *
1291
 *
1292
 * This function is called as a result of a callout expiring.
1292
 * This function is called as a result of a callout expiring.
1293
 *
1293
 *
1294
 * @param arg pointer to a request
1294
 * @param arg pointer to a request
1295
 */
1295
 */
1296
static void
1296
static void
1297
storvsc_timeout(void *arg)
1297
storvsc_timeout(void *arg)
1298
{
1298
{
1299
	struct hv_storvsc_request *reqp = arg;
1299
	struct hv_storvsc_request *reqp = arg;
1300
	struct storvsc_softc *sc = reqp->softc;
1300
	struct storvsc_softc *sc = reqp->softc;
1301
	union ccb *ccb = reqp->ccb;
1301
	union ccb *ccb = reqp->ccb;
1302
1302
1303
	if (reqp->retries == 0) {
1303
	if (reqp->retries == 0) {
1304
		mtx_lock(&sc->hs_lock);
1304
		mtx_lock(&sc->hs_lock);
1305
		xpt_print(ccb->ccb_h.path,
1305
		xpt_print(ccb->ccb_h.path,
1306
		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1306
		    "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1307
		    ticks, reqp, ccb->ccb_h.timeout / 1000);
1307
		    ticks, reqp, ccb->ccb_h.timeout / 1000);
1308
		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1308
		cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1309
		mtx_unlock(&sc->hs_lock);
1309
		mtx_unlock(&sc->hs_lock);
1310
1310
1311
		reqp->retries++;
1311
		reqp->retries++;
1312
		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1312
		callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1313
		    0, storvsc_timeout, reqp, 0);
1313
		    0, storvsc_timeout, reqp, 0);
1314
#if HVS_TIMEOUT_TEST
1314
#if HVS_TIMEOUT_TEST
1315
		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1315
		storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1316
#endif
1316
#endif
1317
		return;
1317
		return;
1318
	}
1318
	}
1319
1319
1320
	mtx_lock(&sc->hs_lock);
1320
	mtx_lock(&sc->hs_lock);
1321
	xpt_print(ccb->ccb_h.path,
1321
	xpt_print(ccb->ccb_h.path,
1322
		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1322
		"%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1323
		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1323
		ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1324
		(sc->hs_frozen == 0)?
1324
		(sc->hs_frozen == 0)?
1325
		"freezing the queue" : "the queue is already frozen");
1325
		"freezing the queue" : "the queue is already frozen");
1326
	if (sc->hs_frozen == 0) {
1326
	if (sc->hs_frozen == 0) {
1327
		sc->hs_frozen = 1;
1327
		sc->hs_frozen = 1;
1328
		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1328
		xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1329
	}
1329
	}
1330
	mtx_unlock(&sc->hs_lock);
1330
	mtx_unlock(&sc->hs_lock);
1331
	
1331
	
1332
#if HVS_TIMEOUT_TEST
1332
#if HVS_TIMEOUT_TEST
1333
	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1333
	storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1334
#endif
1334
#endif
1335
}
1335
}
1336
#endif
1336
#endif
1337
1337
1338
/**
1338
/**
1339
 * @brief StorVSC device poll function
1339
 * @brief StorVSC device poll function
1340
 *
1340
 *
1341
 * This function is responsible for servicing requests when
1341
 * This function is responsible for servicing requests when
1342
 * interrupts are disabled (i.e when we are dumping core.)
1342
 * interrupts are disabled (i.e when we are dumping core.)
1343
 *
1343
 *
1344
 * @param sim a pointer to a CAM SCSI interface module
1344
 * @param sim a pointer to a CAM SCSI interface module
1345
 */
1345
 */
1346
static void
1346
static void
1347
storvsc_poll(struct cam_sim *sim)
1347
storvsc_poll(struct cam_sim *sim)
1348
{
1348
{
1349
	struct storvsc_softc *sc = cam_sim_softc(sim);
1349
	struct storvsc_softc *sc = cam_sim_softc(sim);
1350
1350
1351
	mtx_assert(&sc->hs_lock, MA_OWNED);
1351
	mtx_assert(&sc->hs_lock, MA_OWNED);
1352
	mtx_unlock(&sc->hs_lock);
1352
	mtx_unlock(&sc->hs_lock);
1353
	hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1353
	hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1354
	mtx_lock(&sc->hs_lock);
1354
	mtx_lock(&sc->hs_lock);
1355
}
1355
}
1356
1356
1357
/**
1357
/**
1358
 * @brief StorVSC device action function
1358
 * @brief StorVSC device action function
1359
 *
1359
 *
1360
 * This function is responsible for handling SCSI operations which
1360
 * This function is responsible for handling SCSI operations which
1361
 * are passed from the CAM layer.  The requests are in the form of
1361
 * are passed from the CAM layer.  The requests are in the form of
1362
 * CAM control blocks which indicate the action being performed.
1362
 * CAM control blocks which indicate the action being performed.
1363
 * Not all actions require converting the request to a VSCSI protocol
1363
 * Not all actions require converting the request to a VSCSI protocol
1364
 * message - these actions can be responded to by this driver.
1364
 * message - these actions can be responded to by this driver.
1365
 * Requests which are destined for a backend storage device are converted
1365
 * Requests which are destined for a backend storage device are converted
1366
 * to a VSCSI protocol message and sent on the channel connection associated
1366
 * to a VSCSI protocol message and sent on the channel connection associated
1367
 * with this device.
1367
 * with this device.
1368
 *
1368
 *
1369
 * @param sim pointer to a CAM SCSI interface module
1369
 * @param sim pointer to a CAM SCSI interface module
1370
 * @param ccb pointer to a CAM control block
1370
 * @param ccb pointer to a CAM control block
1371
 */
1371
 */
1372
static void
1372
static void
1373
storvsc_action(struct cam_sim *sim, union ccb *ccb)
1373
storvsc_action(struct cam_sim *sim, union ccb *ccb)
1374
{
1374
{
1375
	struct storvsc_softc *sc = cam_sim_softc(sim);
1375
	struct storvsc_softc *sc = cam_sim_softc(sim);
1376
	int res;
1376
	int res;
1377
1377
1378
	mtx_assert(&sc->hs_lock, MA_OWNED);
1378
	mtx_assert(&sc->hs_lock, MA_OWNED);
1379
	switch (ccb->ccb_h.func_code) {
1379
	switch (ccb->ccb_h.func_code) {
1380
	case XPT_PATH_INQ: {
1380
	case XPT_PATH_INQ: {
1381
		struct ccb_pathinq *cpi = &ccb->cpi;
1381
		struct ccb_pathinq *cpi = &ccb->cpi;
1382
1382
1383
		cpi->version_num = 1;
1383
		cpi->version_num = 1;
1384
		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1384
		cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1385
		cpi->target_sprt = 0;
1385
		cpi->target_sprt = 0;
1386
		cpi->hba_misc = PIM_NOBUSRESET;
1386
		cpi->hba_misc = PIM_NOBUSRESET;
1387
		cpi->hba_eng_cnt = 0;
1387
		cpi->hba_eng_cnt = 0;
1388
		cpi->max_target = STORVSC_MAX_TARGETS;
1388
		cpi->max_target = STORVSC_MAX_TARGETS;
1389
		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1389
		cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1390
		cpi->initiator_id = cpi->max_target;
1390
		cpi->initiator_id = cpi->max_target;
1391
		cpi->bus_id = cam_sim_bus(sim);
1391
		cpi->bus_id = cam_sim_bus(sim);
1392
		cpi->base_transfer_speed = 300000;
1392
		cpi->base_transfer_speed = 300000;
1393
		cpi->transport = XPORT_SAS;
1393
		cpi->transport = XPORT_SAS;
1394
		cpi->transport_version = 0;
1394
		cpi->transport_version = 0;
1395
		cpi->protocol = PROTO_SCSI;
1395
		cpi->protocol = PROTO_SCSI;
1396
		cpi->protocol_version = SCSI_REV_SPC2;
1396
		cpi->protocol_version = SCSI_REV_SPC2;
1397
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1397
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1398
		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1398
		strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1399
		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1399
		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1400
		cpi->unit_number = cam_sim_unit(sim);
1400
		cpi->unit_number = cam_sim_unit(sim);
1401
1401
1402
		ccb->ccb_h.status = CAM_REQ_CMP;
1402
		ccb->ccb_h.status = CAM_REQ_CMP;
1403
		xpt_done(ccb);
1403
		xpt_done(ccb);
1404
		return;
1404
		return;
1405
	}
1405
	}
1406
	case XPT_GET_TRAN_SETTINGS: {
1406
	case XPT_GET_TRAN_SETTINGS: {
1407
		struct  ccb_trans_settings *cts = &ccb->cts;
1407
		struct  ccb_trans_settings *cts = &ccb->cts;
1408
1408
1409
		cts->transport = XPORT_SAS;
1409
		cts->transport = XPORT_SAS;
1410
		cts->transport_version = 0;
1410
		cts->transport_version = 0;
1411
		cts->protocol = PROTO_SCSI;
1411
		cts->protocol = PROTO_SCSI;
1412
		cts->protocol_version = SCSI_REV_SPC2;
1412
		cts->protocol_version = SCSI_REV_SPC2;
1413
1413
1414
		/* enable tag queuing and disconnected mode */
1414
		/* enable tag queuing and disconnected mode */
1415
		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1415
		cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1416
		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1416
		cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1417
		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1417
		cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1418
		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1418
		cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1419
		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1419
		cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1420
			
1420
			
1421
		ccb->ccb_h.status = CAM_REQ_CMP;
1421
		ccb->ccb_h.status = CAM_REQ_CMP;
1422
		xpt_done(ccb);
1422
		xpt_done(ccb);
1423
		return;
1423
		return;
1424
	}
1424
	}
1425
	case XPT_SET_TRAN_SETTINGS:	{
1425
	case XPT_SET_TRAN_SETTINGS:	{
1426
		ccb->ccb_h.status = CAM_REQ_CMP;
1426
		ccb->ccb_h.status = CAM_REQ_CMP;
1427
		xpt_done(ccb);
1427
		xpt_done(ccb);
1428
		return;
1428
		return;
1429
	}
1429
	}
1430
	case XPT_CALC_GEOMETRY:{
1430
	case XPT_CALC_GEOMETRY:{
1431
		cam_calc_geometry(&ccb->ccg, 1);
1431
		cam_calc_geometry(&ccb->ccg, 1);
1432
		xpt_done(ccb);
1432
		xpt_done(ccb);
1433
		return;
1433
		return;
1434
	}
1434
	}
1435
	case  XPT_RESET_BUS:
1435
	case  XPT_RESET_BUS:
1436
	case  XPT_RESET_DEV:{
1436
	case  XPT_RESET_DEV:{
1437
#if HVS_HOST_RESET
1437
#if HVS_HOST_RESET
1438
		if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1438
		if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1439
			xpt_print(ccb->ccb_h.path,
1439
			xpt_print(ccb->ccb_h.path,
1440
				"hv_storvsc_host_reset failed with %d\n", res);
1440
				"hv_storvsc_host_reset failed with %d\n", res);
1441
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1441
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1442
			xpt_done(ccb);
1442
			xpt_done(ccb);
1443
			return;
1443
			return;
1444
		}
1444
		}
1445
		ccb->ccb_h.status = CAM_REQ_CMP;
1445
		ccb->ccb_h.status = CAM_REQ_CMP;
1446
		xpt_done(ccb);
1446
		xpt_done(ccb);
1447
		return;
1447
		return;
1448
#else
1448
#else
1449
		xpt_print(ccb->ccb_h.path,
1449
		xpt_print(ccb->ccb_h.path,
1450
				  "%s reset not supported.\n",
1450
				  "%s reset not supported.\n",
1451
				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1451
				  (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1452
				  "bus" : "dev");
1452
				  "bus" : "dev");
1453
		ccb->ccb_h.status = CAM_REQ_INVALID;
1453
		ccb->ccb_h.status = CAM_REQ_INVALID;
1454
		xpt_done(ccb);
1454
		xpt_done(ccb);
1455
		return;
1455
		return;
1456
#endif	/* HVS_HOST_RESET */
1456
#endif	/* HVS_HOST_RESET */
1457
	}
1457
	}
1458
	case XPT_SCSI_IO:
1458
	case XPT_SCSI_IO:
1459
	case XPT_IMMED_NOTIFY: {
1459
	case XPT_IMMED_NOTIFY: {
1460
		struct hv_storvsc_request *reqp = NULL;
1460
		struct hv_storvsc_request *reqp = NULL;
1461
1461
1462
		if (ccb->csio.cdb_len == 0) {
1462
		if (ccb->csio.cdb_len == 0) {
1463
			panic("cdl_len is 0\n");
1463
			panic("cdl_len is 0\n");
1464
		}
1464
		}
1465
1465
1466
		if (LIST_EMPTY(&sc->hs_free_list)) {
1466
		if (LIST_EMPTY(&sc->hs_free_list)) {
1467
			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1467
			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1468
			if (sc->hs_frozen == 0) {
1468
			if (sc->hs_frozen == 0) {
1469
				sc->hs_frozen = 1;
1469
				sc->hs_frozen = 1;
1470
				xpt_freeze_simq(sim, /* count*/1);
1470
				xpt_freeze_simq(sim, /* count*/1);
1471
			}
1471
			}
1472
			xpt_done(ccb);
1472
			xpt_done(ccb);
1473
			return;
1473
			return;
1474
		}
1474
		}
1475
1475
1476
		reqp = LIST_FIRST(&sc->hs_free_list);
1476
		reqp = LIST_FIRST(&sc->hs_free_list);
1477
		LIST_REMOVE(reqp, link);
1477
		LIST_REMOVE(reqp, link);
1478
1478
1479
		bzero(reqp, sizeof(struct hv_storvsc_request));
1479
		bzero(reqp, sizeof(struct hv_storvsc_request));
1480
		reqp->softc = sc;
1480
		reqp->softc = sc;
1481
		
1481
		
1482
		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1482
		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1483
		if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1483
		if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1484
			ccb->ccb_h.status = CAM_REQ_INVALID;
1484
			ccb->ccb_h.status = CAM_REQ_INVALID;
1485
			xpt_done(ccb);
1485
			xpt_done(ccb);
1486
			return;
1486
			return;
1487
		}
1487
		}
1488
1488
1489
#ifdef notyet
1489
#ifdef notyet
1490
		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1490
		if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1491
			callout_init(&reqp->callout, 1);
1491
			callout_init(&reqp->callout, 1);
1492
			callout_reset_sbt(&reqp->callout,
1492
			callout_reset_sbt(&reqp->callout,
1493
			    SBT_1MS * ccb->ccb_h.timeout, 0,
1493
			    SBT_1MS * ccb->ccb_h.timeout, 0,
1494
			    storvsc_timeout, reqp, 0);
1494
			    storvsc_timeout, reqp, 0);
1495
#if HVS_TIMEOUT_TEST
1495
#if HVS_TIMEOUT_TEST
1496
			cv_init(&reqp->event.cv, "storvsc timeout cv");
1496
			cv_init(&reqp->event.cv, "storvsc timeout cv");
1497
			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1497
			mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1498
					NULL, MTX_DEF);
1498
					NULL, MTX_DEF);
1499
			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1499
			switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1500
				case MODE_SELECT_10:
1500
				case MODE_SELECT_10:
1501
				case SEND_DIAGNOSTIC:
1501
				case SEND_DIAGNOSTIC:
1502
					/* To have timer send the request. */
1502
					/* To have timer send the request. */
1503
					return;
1503
					return;
1504
				default:
1504
				default:
1505
					break;
1505
					break;
1506
			}
1506
			}
1507
#endif /* HVS_TIMEOUT_TEST */
1507
#endif /* HVS_TIMEOUT_TEST */
1508
		}
1508
		}
1509
#endif
1509
#endif
1510
1510
1511
		if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1511
		if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1512
			xpt_print(ccb->ccb_h.path,
1512
			xpt_print(ccb->ccb_h.path,
1513
				"hv_storvsc_io_request failed with %d\n", res);
1513
				"hv_storvsc_io_request failed with %d\n", res);
1514
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1514
			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1515
			storvsc_free_request(sc, reqp);
1515
			storvsc_free_request(sc, reqp);
1516
			xpt_done(ccb);
1516
			xpt_done(ccb);
1517
			return;
1517
			return;
1518
		}
1518
		}
1519
		return;
1519
		return;
1520
	}
1520
	}
1521
1521
1522
	default:
1522
	default:
1523
		ccb->ccb_h.status = CAM_REQ_INVALID;
1523
		ccb->ccb_h.status = CAM_REQ_INVALID;
1524
		xpt_done(ccb);
1524
		xpt_done(ccb);
1525
		return;
1525
		return;
1526
	}
1526
	}
1527
}
1527
}
1528
1528
1529
/**
1529
/**
1530
 * @brief destroy bounce buffer
1530
 * @brief destroy bounce buffer
1531
 *
1531
 *
1532
 * This function is responsible for destroy a Scatter/Gather list
1532
 * This function is responsible for destroy a Scatter/Gather list
1533
 * that create by storvsc_create_bounce_buffer()
1533
 * that create by storvsc_create_bounce_buffer()
1534
 *
1534
 *
1535
 * @param sgl- the Scatter/Gather need be destroy
1535
 * @param sgl- the Scatter/Gather need be destroy
1536
 * @param sg_count- page count of the SG list.
1536
 * @param sg_count- page count of the SG list.
1537
 *
1537
 *
1538
 */
1538
 */
1539
static void
1539
static void
1540
storvsc_destroy_bounce_buffer(struct sglist *sgl)
1540
storvsc_destroy_bounce_buffer(struct sglist *sgl)
1541
{
1541
{
1542
	struct hv_sgl_node *sgl_node = NULL;
1542
	struct hv_sgl_node *sgl_node = NULL;
1543
	if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1543
	if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1544
		printf("storvsc error: not enough in use sgl\n");
1544
		printf("storvsc error: not enough in use sgl\n");
1545
		return;
1545
		return;
1546
	}
1546
	}
1547
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1547
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1548
	LIST_REMOVE(sgl_node, link);
1548
	LIST_REMOVE(sgl_node, link);
1549
	sgl_node->sgl_data = sgl;
1549
	sgl_node->sgl_data = sgl;
1550
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1550
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1551
}
1551
}
1552
1552
1553
/**
1553
/**
1554
 * @brief create bounce buffer
1554
 * @brief create bounce buffer
1555
 *
1555
 *
1556
 * This function is responsible for create a Scatter/Gather list,
1556
 * This function is responsible for create a Scatter/Gather list,
1557
 * which hold several pages that can be aligned with page size.
1557
 * which hold several pages that can be aligned with page size.
1558
 *
1558
 *
1559
 * @param seg_count- SG-list segments count
1559
 * @param seg_count- SG-list segments count
1560
 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1560
 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1561
 * otherwise set used size to page size.
1561
 * otherwise set used size to page size.
1562
 *
1562
 *
1563
 * return NULL if create failed
1563
 * return NULL if create failed
1564
 */
1564
 */
1565
static struct sglist *
1565
static struct sglist *
1566
storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1566
storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1567
{
1567
{
1568
	int i = 0;
1568
	int i = 0;
1569
	struct sglist *bounce_sgl = NULL;
1569
	struct sglist *bounce_sgl = NULL;
1570
	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1570
	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1571
	struct hv_sgl_node *sgl_node = NULL;	
1571
	struct hv_sgl_node *sgl_node = NULL;	
1572
1572
1573
	/* get struct sglist from free_sgl_list */
1573
	/* get struct sglist from free_sgl_list */
1574
	if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1574
	if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1575
		printf("storvsc error: not enough free sgl\n");
1575
		printf("storvsc error: not enough free sgl\n");
1576
		return NULL;
1576
		return NULL;
1577
	}
1577
	}
1578
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1578
	sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1579
	LIST_REMOVE(sgl_node, link);
1579
	LIST_REMOVE(sgl_node, link);
1580
	bounce_sgl = sgl_node->sgl_data;
1580
	bounce_sgl = sgl_node->sgl_data;
1581
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1581
	LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1582
1582
1583
	bounce_sgl->sg_maxseg = seg_count;
1583
	bounce_sgl->sg_maxseg = seg_count;
1584
1584
1585
	if (write == WRITE_TYPE)
1585
	if (write == WRITE_TYPE)
1586
		bounce_sgl->sg_nseg = 0;
1586
		bounce_sgl->sg_nseg = 0;
1587
	else
1587
	else
1588
		bounce_sgl->sg_nseg = seg_count;
1588
		bounce_sgl->sg_nseg = seg_count;
1589
1589
1590
	for (i = 0; i < seg_count; i++)
1590
	for (i = 0; i < seg_count; i++)
1591
	        bounce_sgl->sg_segs[i].ss_len = buf_len;
1591
	        bounce_sgl->sg_segs[i].ss_len = buf_len;
1592
1592
1593
	return bounce_sgl;
1593
	return bounce_sgl;
1594
}
1594
}
1595
1595
1596
/**
1596
/**
1597
 * @brief copy data from SG list to bounce buffer
1597
 * @brief copy data from SG list to bounce buffer
1598
 *
1598
 *
1599
 * This function is responsible for copy data from one SG list's segments
1599
 * This function is responsible for copy data from one SG list's segments
1600
 * to another SG list which used as bounce buffer.
1600
 * to another SG list which used as bounce buffer.
1601
 *
1601
 *
1602
 * @param bounce_sgl - the destination SG list
1602
 * @param bounce_sgl - the destination SG list
1603
 * @param orig_sgl - the segment of the source SG list.
1603
 * @param orig_sgl - the segment of the source SG list.
1604
 * @param orig_sgl_count - the count of segments.
1604
 * @param orig_sgl_count - the count of segments.
1605
 * @param orig_sgl_count - indicate which segment need bounce buffer,
1605
 * @param orig_sgl_count - indicate which segment need bounce buffer,
1606
 *  set 1 means need.
1606
 *  set 1 means need.
1607
 *
1607
 *
1608
 */
1608
 */
1609
static void
1609
static void
1610
storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1610
storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1611
			       bus_dma_segment_t *orig_sgl,
1611
			       bus_dma_segment_t *orig_sgl,
1612
			       unsigned int orig_sgl_count,
1612
			       unsigned int orig_sgl_count,
1613
			       uint64_t seg_bits)
1613
			       uint64_t seg_bits)
1614
{
1614
{
1615
	int src_sgl_idx = 0;
1615
	int src_sgl_idx = 0;
1616
1616
1617
	for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1617
	for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1618
		if (seg_bits & (1 << src_sgl_idx)) {
1618
		if (seg_bits & (1 << src_sgl_idx)) {
1619
			memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1619
			memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1620
			    (void*)orig_sgl[src_sgl_idx].ds_addr,
1620
			    (void*)orig_sgl[src_sgl_idx].ds_addr,
1621
			    orig_sgl[src_sgl_idx].ds_len);
1621
			    orig_sgl[src_sgl_idx].ds_len);
1622
1622
1623
			bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1623
			bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1624
			    orig_sgl[src_sgl_idx].ds_len;
1624
			    orig_sgl[src_sgl_idx].ds_len;
1625
		}
1625
		}
1626
	}
1626
	}
1627
}
1627
}
1628
1628
1629
/**
1629
/**
1630
 * @brief copy data from SG list which used as bounce to another SG list
1630
 * @brief copy data from SG list which used as bounce to another SG list
1631
 *
1631
 *
1632
 * This function is responsible for copy data from one SG list with bounce
1632
 * This function is responsible for copy data from one SG list with bounce
1633
 * buffer to another SG list's segments.
1633
 * buffer to another SG list's segments.
1634
 *
1634
 *
1635
 * @param dest_sgl - the destination SG list's segments
1635
 * @param dest_sgl - the destination SG list's segments
1636
 * @param dest_sgl_count - the count of destination SG list's segment.
1636
 * @param dest_sgl_count - the count of destination SG list's segment.
1637
 * @param src_sgl - the source SG list.
1637
 * @param src_sgl - the source SG list.
1638
 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1638
 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1639
 *
1639
 *
1640
 */
1640
 */
1641
void
1641
void
1642
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1642
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1643
				    unsigned int dest_sgl_count,
1643
				    unsigned int dest_sgl_count,
1644
				    struct sglist* src_sgl,
1644
				    struct sglist* src_sgl,
1645
				    uint64_t seg_bits)
1645
				    uint64_t seg_bits)
1646
{
1646
{
1647
	int sgl_idx = 0;
1647
	int sgl_idx = 0;
1648
	
1648
	
1649
	for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1649
	for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1650
		if (seg_bits & (1 << sgl_idx)) {
1650
		if (seg_bits & (1 << sgl_idx)) {
1651
			memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1651
			memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1652
			    (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1652
			    (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1653
			    src_sgl->sg_segs[sgl_idx].ss_len);
1653
			    src_sgl->sg_segs[sgl_idx].ss_len);
1654
		}
1654
		}
1655
	}
1655
	}
1656
}
1656
}
1657
1657
1658
/**
1658
/**
1659
 * @brief check SG list with bounce buffer or not
1659
 * @brief check SG list with bounce buffer or not
1660
 *
1660
 *
1661
 * This function is responsible for check if need bounce buffer for SG list.
1661
 * This function is responsible for check if need bounce buffer for SG list.
1662
 *
1662
 *
1663
 * @param sgl - the SG list's segments
1663
 * @param sgl - the SG list's segments
1664
 * @param sg_count - the count of SG list's segment.
1664
 * @param sg_count - the count of SG list's segment.
1665
 * @param bits - segmengs number that need bounce buffer
1665
 * @param bits - segmengs number that need bounce buffer
1666
 *
1666
 *
1667
 * return -1 if SG list needless bounce buffer
1667
 * return -1 if SG list needless bounce buffer
1668
 */
1668
 */
1669
static int
1669
static int
1670
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1670
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1671
				unsigned int sg_count,
1671
				unsigned int sg_count,
1672
				uint64_t *bits)
1672
				uint64_t *bits)
1673
{
1673
{
1674
	int i = 0;
1674
	int i = 0;
1675
	int offset = 0;
1675
	int offset = 0;
1676
	uint64_t phys_addr = 0;
1676
	uint64_t phys_addr = 0;
1677
	uint64_t tmp_bits = 0;
1677
	uint64_t tmp_bits = 0;
1678
	boolean_t found_hole = FALSE;
1678
	boolean_t found_hole = FALSE;
1679
	boolean_t pre_aligned = TRUE;
1679
	boolean_t pre_aligned = TRUE;
1680
1680
1681
	if (sg_count < 2){
1681
	if (sg_count < 2){
1682
		return -1;
1682
		return -1;
1683
	}
1683
	}
1684
1684
1685
	*bits = 0;
1685
	*bits = 0;
1686
	
1686
	
1687
	phys_addr = vtophys(sgl[0].ds_addr);
1687
	phys_addr = vtophys(sgl[0].ds_addr);
1688
	offset =  phys_addr - trunc_page(phys_addr);
1688
	offset =  phys_addr - trunc_page(phys_addr);
1689
1689
1690
	if (offset != 0) {
1690
	if (offset != 0) {
1691
		pre_aligned = FALSE;
1691
		pre_aligned = FALSE;
1692
		tmp_bits |= 1;
1692
		tmp_bits |= 1;
1693
	}
1693
	}
1694
1694
1695
	for (i = 1; i < sg_count; i++) {
1695
	for (i = 1; i < sg_count; i++) {
1696
		phys_addr = vtophys(sgl[i].ds_addr);
1696
		phys_addr = vtophys(sgl[i].ds_addr);
1697
		offset =  phys_addr - trunc_page(phys_addr);
1697
		offset =  phys_addr - trunc_page(phys_addr);
1698
1698
1699
		if (offset == 0) {
1699
		if (offset == 0) {
1700
			if (FALSE == pre_aligned){
1700
			if (FALSE == pre_aligned){
1701
				/*
1701
				/*
1702
				 * This segment is aligned, if the previous
1702
				 * This segment is aligned, if the previous
1703
				 * one is not aligned, find a hole
1703
				 * one is not aligned, find a hole
1704
				 */
1704
				 */
1705
				found_hole = TRUE;
1705
				found_hole = TRUE;
1706
			}
1706
			}
1707
			pre_aligned = TRUE;
1707
			pre_aligned = TRUE;
1708
		} else {
1708
		} else {
1709
			tmp_bits |= 1 << i;
1709
			tmp_bits |= 1 << i;
1710
			if (!pre_aligned) {
1710
			if (!pre_aligned) {
1711
				if (phys_addr != vtophys(sgl[i-1].ds_addr +
1711
				if (phys_addr != vtophys(sgl[i-1].ds_addr +
1712
				    sgl[i-1].ds_len)) {
1712
				    sgl[i-1].ds_len)) {
1713
					/*
1713
					/*
1714
					 * Check whether connect to previous
1714
					 * Check whether connect to previous
1715
					 * segment,if not, find the hole
1715
					 * segment,if not, find the hole
1716
					 */
1716
					 */
1717
					found_hole = TRUE;
1717
					found_hole = TRUE;
1718
				}
1718
				}
1719
			} else {
1719
			} else {
1720
				found_hole = TRUE;
1720
				found_hole = TRUE;
1721
			}
1721
			}
1722
			pre_aligned = FALSE;
1722
			pre_aligned = FALSE;
1723
		}
1723
		}
1724
	}
1724
	}
1725
1725
1726
	if (!found_hole) {
1726
	if (!found_hole) {
1727
		return (-1);
1727
		return (-1);
1728
	} else {
1728
	} else {
1729
		*bits = tmp_bits;
1729
		*bits = tmp_bits;
1730
		return 0;
1730
		return 0;
1731
	}
1731
	}
1732
}
1732
}
1733
1733
1734
/**
1734
/**
1735
 * @brief Fill in a request structure based on a CAM control block
1735
 * @brief Fill in a request structure based on a CAM control block
1736
 *
1736
 *
1737
 * Fills in a request structure based on the contents of a CAM control
1737
 * Fills in a request structure based on the contents of a CAM control
1738
 * block.  The request structure holds the payload information for
1738
 * block.  The request structure holds the payload information for
1739
 * VSCSI protocol request.
1739
 * VSCSI protocol request.
1740
 *
1740
 *
1741
 * @param ccb pointer to a CAM contorl block
1741
 * @param ccb pointer to a CAM contorl block
1742
 * @param reqp pointer to a request structure
1742
 * @param reqp pointer to a request structure
1743
 */
1743
 */
1744
static int
1744
static int
1745
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1745
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1746
{
1746
{
1747
	struct ccb_scsiio *csio = &ccb->csio;
1747
	struct ccb_scsiio *csio = &ccb->csio;
1748
	uint64_t phys_addr;
1748
	uint64_t phys_addr;
1749
	uint32_t bytes_to_copy = 0;
1749
	uint32_t bytes_to_copy = 0;
1750
	uint32_t pfn_num = 0;
1750
	uint32_t pfn_num = 0;
1751
	uint32_t pfn;
1751
	uint32_t pfn;
1752
	uint64_t not_aligned_seg_bits = 0;
1752
	uint64_t not_aligned_seg_bits = 0;
1753
	
1753
	
1754
	/* refer to struct vmscsi_req for meanings of these two fields */
1754
	/* refer to struct vmscsi_req for meanings of these two fields */
1755
	reqp->vstor_packet.u.vm_srb.port =
1755
	reqp->vstor_packet.u.vm_srb.port =
1756
		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1756
		cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1757
	reqp->vstor_packet.u.vm_srb.path_id =
1757
	reqp->vstor_packet.u.vm_srb.path_id =
1758
		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1758
		cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1759
1759
1760
	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1760
	reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1761
	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1761
	reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1762
1762
1763
	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1763
	reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1764
	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1764
	if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1765
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1765
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1766
			csio->cdb_len);
1766
			csio->cdb_len);
1767
	} else {
1767
	} else {
1768
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1768
		memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1769
			csio->cdb_len);
1769
			csio->cdb_len);
1770
	}
1770
	}
1771
1771
1772
	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1772
	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1773
	case CAM_DIR_OUT:
1773
	case CAM_DIR_OUT:
1774
		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;	
1774
		reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;	
1775
		break;
1775
		break;
1776
	case CAM_DIR_IN:
1776
	case CAM_DIR_IN:
1777
		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1777
		reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1778
		break;
1778
		break;
1779
	case CAM_DIR_NONE:
1779
	case CAM_DIR_NONE:
1780
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1780
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1781
		break;
1781
		break;
1782
	default:
1782
	default:
1783
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1783
		reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1784
		break;
1784
		break;
1785
	}
1785
	}
1786
1786
1787
	reqp->sense_data     = &csio->sense_data;
1787
	reqp->sense_data     = &csio->sense_data;
1788
	reqp->sense_info_len = csio->sense_len;
1788
	reqp->sense_info_len = csio->sense_len;
1789
1789
1790
	reqp->ccb = ccb;
1790
	reqp->ccb = ccb;
1791
1791
1792
	if (0 == csio->dxfer_len) {
1792
	if (0 == csio->dxfer_len) {
1793
		return (0);
1793
		return (0);
1794
	}
1794
	}
1795
1795
1796
	reqp->data_buf.length = csio->dxfer_len;
1796
	reqp->data_buf.length = csio->dxfer_len;
1797
1797
1798
	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1798
	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1799
	case CAM_DATA_VADDR:
1799
	case CAM_DATA_VADDR:
1800
	{
1800
	{
1801
		bytes_to_copy = csio->dxfer_len;
1801
		bytes_to_copy = csio->dxfer_len;
1802
		phys_addr = vtophys(csio->data_ptr);
1802
		phys_addr = vtophys(csio->data_ptr);
1803
		reqp->data_buf.offset = phys_addr & PAGE_MASK;
1803
		reqp->data_buf.offset = phys_addr & PAGE_MASK;
1804
		
1804
		
1805
		while (bytes_to_copy != 0) {
1805
		while (bytes_to_copy != 0) {
1806
			int bytes, page_offset;
1806
			int bytes, page_offset;
1807
			phys_addr =
1807
			phys_addr =
1808
			    vtophys(&csio->data_ptr[reqp->data_buf.length -
1808
			    vtophys(&csio->data_ptr[reqp->data_buf.length -
1809
			    bytes_to_copy]);
1809
			    bytes_to_copy]);
1810
			pfn = phys_addr >> PAGE_SHIFT;
1810
			pfn = phys_addr >> PAGE_SHIFT;
1811
			reqp->data_buf.pfn_array[pfn_num] = pfn;
1811
			reqp->data_buf.pfn_array[pfn_num] = pfn;
1812
			page_offset = phys_addr & PAGE_MASK;
1812
			page_offset = phys_addr & PAGE_MASK;
1813
1813
1814
			bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1814
			bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1815
1815
1816
			bytes_to_copy -= bytes;
1816
			bytes_to_copy -= bytes;
1817
			pfn_num++;
1817
			pfn_num++;
1818
		}
1818
		}
1819
		break;
1819
		break;
1820
	}
1820
	}
1821
1821
1822
	case CAM_DATA_SG:
1822
	case CAM_DATA_SG:
1823
	{
1823
	{
1824
		int i = 0;
1824
		int i = 0;
1825
		int offset = 0;
1825
		int offset = 0;
1826
		int ret;
1826
		int ret;
1827
1827
1828
		bus_dma_segment_t *storvsc_sglist =
1828
		bus_dma_segment_t *storvsc_sglist =
1829
		    (bus_dma_segment_t *)ccb->csio.data_ptr;
1829
		    (bus_dma_segment_t *)ccb->csio.data_ptr;
1830
		u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1830
		u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1831
1831
1832
		printf("Storvsc: get SG I/O operation, %d\n",
1832
		printf("Storvsc: get SG I/O operation, %d\n",
1833
		    reqp->vstor_packet.u.vm_srb.data_in);
1833
		    reqp->vstor_packet.u.vm_srb.data_in);
1834
1834
1835
		if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1835
		if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1836
			printf("Storvsc: %d segments is too much, "
1836
			printf("Storvsc: %d segments is too much, "
1837
			    "only support %d segments\n",
1837
			    "only support %d segments\n",
1838
			    storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1838
			    storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1839
			return (EINVAL);
1839
			return (EINVAL);
1840
		}
1840
		}
1841
1841
1842
		/*
1842
		/*
1843
		 * We create our own bounce buffer function currently. Idealy
1843
		 * We create our own bounce buffer function currently. Idealy
1844
		 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1844
		 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1845
		 * code there is no callback API to check the page alignment of
1845
		 * code there is no callback API to check the page alignment of
1846
		 * middle segments before busdma can decide if a bounce buffer
1846
		 * middle segments before busdma can decide if a bounce buffer
1847
		 * is needed for particular segment. There is callback,
1847
		 * is needed for particular segment. There is callback,
1848
		 * "bus_dma_filter_t *filter", but the parrameters are not
1848
		 * "bus_dma_filter_t *filter", but the parrameters are not
1849
		 * sufficient for storvsc driver.
1849
		 * sufficient for storvsc driver.
1850
		 * TODO:
1850
		 * TODO:
1851
		 *	Add page alignment check in BUS_DMA(9) callback. Once
1851
		 *	Add page alignment check in BUS_DMA(9) callback. Once
1852
		 *	this is complete, switch the following code to use
1852
		 *	this is complete, switch the following code to use
1853
		 *	BUS_DMA(9) for storvsc bounce buffer support.
1853
		 *	BUS_DMA(9) for storvsc bounce buffer support.
1854
		 */
1854
		 */
1855
		/* check if we need to create bounce buffer */
1855
		/* check if we need to create bounce buffer */
1856
		ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1856
		ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1857
		    storvsc_sg_count, &not_aligned_seg_bits);
1857
		    storvsc_sg_count, &not_aligned_seg_bits);
1858
		if (ret != -1) {
1858
		if (ret != -1) {
1859
			reqp->bounce_sgl =
1859
			reqp->bounce_sgl =
1860
			    storvsc_create_bounce_buffer(storvsc_sg_count,
1860
			    storvsc_create_bounce_buffer(storvsc_sg_count,
1861
			    reqp->vstor_packet.u.vm_srb.data_in);
1861
			    reqp->vstor_packet.u.vm_srb.data_in);
1862
			if (NULL == reqp->bounce_sgl) {
1862
			if (NULL == reqp->bounce_sgl) {
1863
				printf("Storvsc_error: "
1863
				printf("Storvsc_error: "
1864
				    "create bounce buffer failed.\n");
1864
				    "create bounce buffer failed.\n");
1865
				return (ENOMEM);
1865
				return (ENOMEM);
1866
			}
1866
			}
1867
1867
1868
			reqp->bounce_sgl_count = storvsc_sg_count;
1868
			reqp->bounce_sgl_count = storvsc_sg_count;
1869
			reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1869
			reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1870
1870
1871
			/*
1871
			/*
1872
			 * if it is write, we need copy the original data
1872
			 * if it is write, we need copy the original data
1873
			 *to bounce buffer
1873
			 *to bounce buffer
1874
			 */
1874
			 */
1875
			if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1875
			if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1876
				storvsc_copy_sgl_to_bounce_buf(
1876
				storvsc_copy_sgl_to_bounce_buf(
1877
				    reqp->bounce_sgl,
1877
				    reqp->bounce_sgl,
1878
				    storvsc_sglist,
1878
				    storvsc_sglist,
1879
				    storvsc_sg_count,
1879
				    storvsc_sg_count,
1880
				    reqp->not_aligned_seg_bits);
1880
				    reqp->not_aligned_seg_bits);
1881
			}
1881
			}
1882
1882
1883
			/* transfer virtual address to physical frame number */
1883
			/* transfer virtual address to physical frame number */
1884
			if (reqp->not_aligned_seg_bits & 0x1){
1884
			if (reqp->not_aligned_seg_bits & 0x1){
1885
 				phys_addr =
1885
 				phys_addr =
1886
				    vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1886
				    vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1887
			}else{
1887
			}else{
1888
 				phys_addr =
1888
 				phys_addr =
1889
					vtophys(storvsc_sglist[0].ds_addr);
1889
					vtophys(storvsc_sglist[0].ds_addr);
1890
			}
1890
			}
1891
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1891
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1892
1892
1893
			pfn = phys_addr >> PAGE_SHIFT;
1893
			pfn = phys_addr >> PAGE_SHIFT;
1894
			reqp->data_buf.pfn_array[0] = pfn;
1894
			reqp->data_buf.pfn_array[0] = pfn;
1895
			
1895
			
1896
			for (i = 1; i < storvsc_sg_count; i++) {
1896
			for (i = 1; i < storvsc_sg_count; i++) {
1897
				if (reqp->not_aligned_seg_bits & (1 << i)) {
1897
				if (reqp->not_aligned_seg_bits & (1 << i)) {
1898
					phys_addr =
1898
					phys_addr =
1899
					    vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1899
					    vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1900
				} else {
1900
				} else {
1901
					phys_addr =
1901
					phys_addr =
1902
					    vtophys(storvsc_sglist[i].ds_addr);
1902
					    vtophys(storvsc_sglist[i].ds_addr);
1903
				}
1903
				}
1904
1904
1905
				pfn = phys_addr >> PAGE_SHIFT;
1905
				pfn = phys_addr >> PAGE_SHIFT;
1906
				reqp->data_buf.pfn_array[i] = pfn;
1906
				reqp->data_buf.pfn_array[i] = pfn;
1907
			}
1907
			}
1908
		} else {
1908
		} else {
1909
			phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1909
			phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1910
1910
1911
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1911
			reqp->data_buf.offset = phys_addr & PAGE_MASK;
1912
1912
1913
			for (i = 0; i < storvsc_sg_count; i++) {
1913
			for (i = 0; i < storvsc_sg_count; i++) {
1914
				phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1914
				phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1915
				pfn = phys_addr >> PAGE_SHIFT;
1915
				pfn = phys_addr >> PAGE_SHIFT;
1916
				reqp->data_buf.pfn_array[i] = pfn;
1916
				reqp->data_buf.pfn_array[i] = pfn;
1917
			}
1917
			}
1918
1918
1919
			/* check the last segment cross boundary or not */
1919
			/* check the last segment cross boundary or not */
1920
			offset = phys_addr & PAGE_MASK;
1920
			offset = phys_addr & PAGE_MASK;
1921
			if (offset) {
1921
			if (offset) {
1922
				phys_addr =
1922
				phys_addr =
1923
				    vtophys(storvsc_sglist[i-1].ds_addr +
1923
				    vtophys(storvsc_sglist[i-1].ds_addr +
1924
				    PAGE_SIZE - offset);
1924
				    PAGE_SIZE - offset);
1925
				pfn = phys_addr >> PAGE_SHIFT;
1925
				pfn = phys_addr >> PAGE_SHIFT;
1926
				reqp->data_buf.pfn_array[i] = pfn;
1926
				reqp->data_buf.pfn_array[i] = pfn;
1927
			}
1927
			}
1928
			
1928
			
1929
			reqp->bounce_sgl_count = 0;
1929
			reqp->bounce_sgl_count = 0;
1930
		}
1930
		}
1931
		break;
1931
		break;
1932
	}
1932
	}
1933
	default:
1933
	default:
1934
		printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1934
		printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1935
		return(EINVAL);
1935
		return(EINVAL);
1936
	}
1936
	}
1937
1937
1938
	return(0);
1938
	return(0);
1939
}
1939
}
1940
1940
1941
/*
1941
/*
1942
 * Modified based on scsi_print_inquiry which is responsible to
1942
 * Modified based on scsi_print_inquiry which is responsible to
1943
 * print the detail information for scsi_inquiry_data.
1943
 * print the detail information for scsi_inquiry_data.
1944
 *
1944
 *
1945
 * Return 1 if it is valid, 0 otherwise.
1945
 * Return 1 if it is valid, 0 otherwise.
1946
 */
1946
 */
1947
static inline int
1947
static inline int
1948
is_inquiry_valid(const struct scsi_inquiry_data *inq_data)
1948
is_inquiry_valid(const struct scsi_inquiry_data *inq_data)
1949
{
1949
{
1950
	uint8_t type;
1950
	uint8_t type;
1951
	char vendor[16], product[48], revision[16];
1951
	char vendor[16], product[48], revision[16];
1952
1952
1953
	/*
1953
	/*
1954
	 * Check device type and qualifier
1954
	 * Check device type and qualifier
1955
	 */
1955
	 */
1956
	if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ||
1956
	if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ||
1957
	    SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED))
1957
	    SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED))
1958
		return (0);
1958
		return (0);
1959
1959
1960
	type = SID_TYPE(inq_data);
1960
	type = SID_TYPE(inq_data);
1961
	switch (type) {
1961
	switch (type) {
1962
	case T_DIRECT:
1962
	case T_DIRECT:
1963
	case T_SEQUENTIAL:
1963
	case T_SEQUENTIAL:
1964
	case T_PRINTER:
1964
	case T_PRINTER:
1965
	case T_PROCESSOR:
1965
	case T_PROCESSOR:
1966
	case T_WORM:
1966
	case T_WORM:
1967
	case T_CDROM:
1967
	case T_CDROM:
1968
	case T_SCANNER:
1968
	case T_SCANNER:
1969
	case T_OPTICAL:
1969
	case T_OPTICAL:
1970
	case T_CHANGER:
1970
	case T_CHANGER:
1971
	case T_COMM:
1971
	case T_COMM:
1972
	case T_STORARRAY:
1972
	case T_STORARRAY:
1973
	case T_ENCLOSURE:
1973
	case T_ENCLOSURE:
1974
	case T_RBC:
1974
	case T_RBC:
1975
	case T_OCRW:
1975
	case T_OCRW:
1976
	case T_OSD:
1976
	case T_OSD:
1977
	case T_ADC:
1977
	case T_ADC:
1978
		break;
1978
		break;
1979
	case T_NODEVICE:
1979
	case T_NODEVICE:
1980
	default:
1980
	default:
1981
		return (0);
1981
		return (0);
1982
	}
1982
	}
1983
1983
1984
	/*
1984
	/*
1985
	 * Check vendor, product, and revision
1985
	 * Check vendor, product, and revision
1986
	 */
1986
	 */
1987
	cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
1987
	cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
1988
	    sizeof(vendor));
1988
	    sizeof(vendor));
1989
	cam_strvis(product, inq_data->product, sizeof(inq_data->product),
1989
	cam_strvis(product, inq_data->product, sizeof(inq_data->product),
1990
	    sizeof(product));
1990
	    sizeof(product));
1991
	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
1991
	cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
1992
	    sizeof(revision));
1992
	    sizeof(revision));
1993
	if (strlen(vendor) == 0  ||
1993
	if (strlen(vendor) == 0  ||
1994
	    strlen(product) == 0 ||
1994
	    strlen(product) == 0 ||
1995
	    strlen(revision) == 0)
1995
	    strlen(revision) == 0)
1996
		return (0);
1996
		return (0);
1997
1997
1998
	return (1);
1998
	return (1);
1999
}
1999
}
2000
2000
2001
/**
2001
/**
2002
 * @brief completion function before returning to CAM
2002
 * @brief completion function before returning to CAM
2003
 *
2003
 *
2004
 * I/O process has been completed and the result needs
2004
 * I/O process has been completed and the result needs
2005
 * to be passed to the CAM layer.
2005
 * to be passed to the CAM layer.
2006
 * Free resources related to this request.
2006
 * Free resources related to this request.
2007
 *
2007
 *
2008
 * @param reqp pointer to a request structure
2008
 * @param reqp pointer to a request structure
2009
 */
2009
 */
2010
static void
2010
static void
2011
storvsc_io_done(struct hv_storvsc_request *reqp)
2011
storvsc_io_done(struct hv_storvsc_request *reqp)
2012
{
2012
{
2013
	union ccb *ccb = reqp->ccb;
2013
	union ccb *ccb = reqp->ccb;
2014
	struct ccb_scsiio *csio = &ccb->csio;
2014
	struct ccb_scsiio *csio = &ccb->csio;
2015
	struct storvsc_softc *sc = reqp->softc;
2015
	struct storvsc_softc *sc = reqp->softc;
2016
	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
2016
	struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
2017
	bus_dma_segment_t *ori_sglist = NULL;
2017
	bus_dma_segment_t *ori_sglist = NULL;
2018
	int ori_sg_count = 0;
2018
	int ori_sg_count = 0;
2019
2019
2020
	/* destroy bounce buffer if it is used */
2020
	/* destroy bounce buffer if it is used */
2021
	if (reqp->bounce_sgl_count) {
2021
	if (reqp->bounce_sgl_count) {
2022
		ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
2022
		ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
2023
		ori_sg_count = ccb->csio.sglist_cnt;
2023
		ori_sg_count = ccb->csio.sglist_cnt;
2024
2024
2025
		/*
2025
		/*
2026
		 * If it is READ operation, we should copy back the data
2026
		 * If it is READ operation, we should copy back the data
2027
		 * to original SG list.
2027
		 * to original SG list.
2028
		 */
2028
		 */
2029
		if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2029
		if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2030
			storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
2030
			storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
2031
			    ori_sg_count,
2031
			    ori_sg_count,
2032
			    reqp->bounce_sgl,
2032
			    reqp->bounce_sgl,
2033
			    reqp->not_aligned_seg_bits);
2033
			    reqp->not_aligned_seg_bits);
2034
		}
2034
		}
2035
2035
2036
		storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
2036
		storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
2037
		reqp->bounce_sgl_count = 0;
2037
		reqp->bounce_sgl_count = 0;
2038
	}
2038
	}
2039
		
2039
		
2040
	if (reqp->retries > 0) {
2040
	if (reqp->retries > 0) {
2041
		mtx_lock(&sc->hs_lock);
2041
		mtx_lock(&sc->hs_lock);
2042
#if HVS_TIMEOUT_TEST
2042
#if HVS_TIMEOUT_TEST
2043
		xpt_print(ccb->ccb_h.path,
2043
		xpt_print(ccb->ccb_h.path,
2044
			"%u: IO returned after timeout, "
2044
			"%u: IO returned after timeout, "
2045
			"waking up timer handler if any.\n", ticks);
2045
			"waking up timer handler if any.\n", ticks);
2046
		mtx_lock(&reqp->event.mtx);
2046
		mtx_lock(&reqp->event.mtx);
2047
		cv_signal(&reqp->event.cv);
2047
		cv_signal(&reqp->event.cv);
2048
		mtx_unlock(&reqp->event.mtx);
2048
		mtx_unlock(&reqp->event.mtx);
2049
#endif
2049
#endif
2050
		reqp->retries = 0;
2050
		reqp->retries = 0;
2051
		xpt_print(ccb->ccb_h.path,
2051
		xpt_print(ccb->ccb_h.path,
2052
			"%u: IO returned after timeout, "
2052
			"%u: IO returned after timeout, "
2053
			"stopping timer if any.\n", ticks);
2053
			"stopping timer if any.\n", ticks);
2054
		mtx_unlock(&sc->hs_lock);
2054
		mtx_unlock(&sc->hs_lock);
2055
	}
2055
	}
2056
2056
2057
#ifdef notyet
2057
#ifdef notyet
2058
	/*
2058
	/*
2059
	 * callout_drain() will wait for the timer handler to finish
2059
	 * callout_drain() will wait for the timer handler to finish
2060
	 * if it is running. So we don't need any lock to synchronize
2060
	 * if it is running. So we don't need any lock to synchronize
2061
	 * between this routine and the timer handler.
2061
	 * between this routine and the timer handler.
2062
	 * Note that we need to make sure reqp is not freed when timer
2062
	 * Note that we need to make sure reqp is not freed when timer
2063
	 * handler is using or will use it.
2063
	 * handler is using or will use it.
2064
	 */
2064
	 */
2065
	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2065
	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2066
		callout_drain(&reqp->callout);
2066
		callout_drain(&reqp->callout);
2067
	}
2067
	}
2068
#endif
2068
#endif
2069
2069
2070
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2070
	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2071
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2071
	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2072
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2072
	if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2073
		const struct scsi_generic *cmd;
2073
		const struct scsi_generic *cmd;
2074
2075
		/*
2074
		/*
2076
		 * Check whether the data for INQUIRY cmd is valid or
2075
		 * Check whether the data for INQUIRY cmd is valid or
2077
		 * not.  Windows 10 and Windows 2016 send all zero
2076
		 * not.  Windows 10 and Windows 2016 send all zero
2078
		 * inquiry data to VM even for unpopulated slots.
2077
		 * inquiry data to VM even for unpopulated slots.
2079
		 */
2078
		 */
2080
		cmd = (const struct scsi_generic *)
2079
		cmd = (const struct scsi_generic *)
2081
		    ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2080
		    ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2082
		     csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2081
		     csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2083
		if (cmd->opcode == INQUIRY &&
2082
		if (cmd->opcode == INQUIRY) {
2084
		    /* 
2083
		    /*
2085
		     * XXX: Temporary work around disk hot plugin on win2k12r2,
2084
		     * The host of Windows 10 or 2016 server will response
2086
		     * only filtering the invalid disk on win10 or 2016 server.
2085
		     * the inquiry request with invalid data for unexisted device:
2087
		     * So, the hot plugin on win10 and 2016 server needs
2086
			[0x7f 0x0 0x5 0x2 0x1f ... ]
2088
		     * to be fixed.
2087
		     * But on windows 2012 R2, the response is:
2088
			[0x7f 0x0 0x0 0x0 0x0 ]
2089
		     * That is why here wants to validate the inquiry response.
2090
		     * The validation will skip the INQUIRY whose response is short,
2091
		     * which is less than SHORT_INQUIRY_LENGTH (36).
2092
		     *
2093
		     * For more information about INQUIRY, please refer to:
2094
		     *  ftp://ftp.avc-pioneer.com/Mtfuji_7/Proposal/Jun09/INQUIRY.pdf
2089
		     */
2095
		     */
2090
		    vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN10 && 
2096
		    const struct scsi_inquiry_data *inq_data =
2091
		    is_inquiry_valid(
2097
			(const struct scsi_inquiry_data *)csio->data_ptr;
2092
		    (const struct scsi_inquiry_data *)csio->data_ptr) == 0) {
2098
		    uint8_t* resp_buf = (uint8_t*)csio->data_ptr;
2099
		    /* Get the response buffer length */
2100
		    int resp_buf_len = resp_buf[4] + 5;
2101
		    /* The request buffer length is the response buffer capacity */
2102
		    int req_buf_len = csio->dxfer_len;
2103
		    int data_len = (resp_buf_len < req_buf_len) ? resp_buf_len : req_buf_len;
2104
		    if (data_len < SHORT_INQUIRY_LENGTH) {
2105
			ccb->ccb_h.status |= CAM_REQ_CMP;
2106
			if (bootverbose) {
2107
				mtx_lock(&sc->hs_lock);
2108
				xpt_print(ccb->ccb_h.path,
2109
				    "storvsc skips the validation for short inquiry (%d)"
2110
				    " [%x %x %x %x %x]\n",
2111
				    data_len,resp_buf[0],resp_buf[1],resp_buf[2],
2112
				    resp_buf[3],resp_buf[4]);
2113
				mtx_unlock(&sc->hs_lock);
2114
			}
2115
		    } else if (is_inquiry_valid(inq_data) == 0) {
2093
			ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2116
			ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2094
			if (bootverbose) {
2117
			if (bootverbose) {
2095
				mtx_lock(&sc->hs_lock);
2118
				mtx_lock(&sc->hs_lock);
2096
				xpt_print(ccb->ccb_h.path,
2119
				xpt_print(ccb->ccb_h.path,
2097
				    "storvsc uninstalled device\n");
2120
				    "storvsc uninstalled invalid device"
2121
				    " [%x %x %x %x %x]\n",
2122
				resp_buf[0],resp_buf[1],resp_buf[2],resp_buf[3],resp_buf[4]);
2123
				mtx_unlock(&sc->hs_lock);
2124
			}
2125
		    } else {
2126
			ccb->ccb_h.status |= CAM_REQ_CMP;
2127
			if (bootverbose) {
2128
				mtx_lock(&sc->hs_lock);
2129
				xpt_print(ccb->ccb_h.path,
2130
				    "storvsc has passed inquiry response (%d) validation\n",
2131
				    data_len);
2098
				mtx_unlock(&sc->hs_lock);
2132
				mtx_unlock(&sc->hs_lock);
2099
			}
2133
			}
2134
		    }
2100
		} else {
2135
		} else {
2101
			ccb->ccb_h.status |= CAM_REQ_CMP;
2136
			ccb->ccb_h.status |= CAM_REQ_CMP;
2102
		}
2137
		}
2103
	} else {
2138
	} else {
2104
		mtx_lock(&sc->hs_lock);
2139
		mtx_lock(&sc->hs_lock);
2105
		xpt_print(ccb->ccb_h.path,
2140
		xpt_print(ccb->ccb_h.path,
2106
			"storvsc scsi_status = %d\n",
2141
			"storvsc scsi_status = %d\n",
2107
			vm_srb->scsi_status);
2142
			vm_srb->scsi_status);
2108
		mtx_unlock(&sc->hs_lock);
2143
		mtx_unlock(&sc->hs_lock);
2109
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2144
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2110
	}
2145
	}
2111
2146
2112
	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2147
	ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2113
	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2148
	ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2114
2149
2115
	if (reqp->sense_info_len != 0) {
2150
	if (reqp->sense_info_len != 0) {
2116
		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2151
		csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2117
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2152
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2118
	}
2153
	}
2119
2154
2120
	mtx_lock(&sc->hs_lock);
2155
	mtx_lock(&sc->hs_lock);
2121
	if (reqp->softc->hs_frozen == 1) {
2156
	if (reqp->softc->hs_frozen == 1) {
2122
		xpt_print(ccb->ccb_h.path,
2157
		xpt_print(ccb->ccb_h.path,
2123
			"%u: storvsc unfreezing softc 0x%p.\n",
2158
			"%u: storvsc unfreezing softc 0x%p.\n",
2124
			ticks, reqp->softc);
2159
			ticks, reqp->softc);
2125
		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2160
		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2126
		reqp->softc->hs_frozen = 0;
2161
		reqp->softc->hs_frozen = 0;
2127
	}
2162
	}
2128
	storvsc_free_request(sc, reqp);
2163
	storvsc_free_request(sc, reqp);
2129
	mtx_unlock(&sc->hs_lock);
2164
	mtx_unlock(&sc->hs_lock);
2130
2165
2131
	xpt_done_direct(ccb);
2166
	xpt_done_direct(ccb);
2132
}
2167
}
2133
2168
2134
/**
2169
/**
2135
 * @brief Free a request structure
2170
 * @brief Free a request structure
2136
 *
2171
 *
2137
 * Free a request structure by returning it to the free list
2172
 * Free a request structure by returning it to the free list
2138
 *
2173
 *
2139
 * @param sc pointer to a softc
2174
 * @param sc pointer to a softc
2140
 * @param reqp pointer to a request structure
2175
 * @param reqp pointer to a request structure
2141
 */	
2176
 */	
2142
static void
2177
static void
2143
storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2178
storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2144
{
2179
{
2145
2180
2146
	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2181
	LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2147
}
2182
}
2148
2183
2149
/**
2184
/**
2150
 * @brief Determine type of storage device from GUID
2185
 * @brief Determine type of storage device from GUID
2151
 *
2186
 *
2152
 * Using the type GUID, determine if this is a StorVSC (paravirtual
2187
 * Using the type GUID, determine if this is a StorVSC (paravirtual
2153
 * SCSI or BlkVSC (paravirtual IDE) device.
2188
 * SCSI or BlkVSC (paravirtual IDE) device.
2154
 *
2189
 *
2155
 * @param dev a device
2190
 * @param dev a device
2156
 * returns an enum
2191
 * returns an enum
2157
 */
2192
 */
2158
static enum hv_storage_type
2193
static enum hv_storage_type
2159
storvsc_get_storage_type(device_t dev)
2194
storvsc_get_storage_type(device_t dev)
2160
{
2195
{
2161
	const char *p = vmbus_get_type(dev);
2196
	const char *p = vmbus_get_type(dev);
2162
2197
2163
	if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2198
	if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2164
		return DRIVER_BLKVSC;
2199
		return DRIVER_BLKVSC;
2165
	} else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2200
	} else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2166
		return DRIVER_STORVSC;
2201
		return DRIVER_STORVSC;
2167
	}
2202
	}
2168
	return (DRIVER_UNKNOWN);
2203
	return (DRIVER_UNKNOWN);
2169
}
2204
}
2170
2205

Return to bug 209443