Lines 38-43
Link Here
|
38 |
#include <sys/param.h> |
38 |
#include <sys/param.h> |
39 |
#include <sys/proc.h> |
39 |
#include <sys/proc.h> |
40 |
#include <sys/condvar.h> |
40 |
#include <sys/condvar.h> |
|
|
41 |
#include <sys/time.h> |
41 |
#include <sys/systm.h> |
42 |
#include <sys/systm.h> |
42 |
#include <sys/sockio.h> |
43 |
#include <sys/sockio.h> |
43 |
#include <sys/mbuf.h> |
44 |
#include <sys/mbuf.h> |
Lines 53-60
Link Here
|
53 |
#include <sys/callout.h> |
54 |
#include <sys/callout.h> |
54 |
#include <vm/vm.h> |
55 |
#include <vm/vm.h> |
55 |
#include <vm/pmap.h> |
56 |
#include <vm/pmap.h> |
|
|
57 |
#include <vm/uma.h> |
56 |
#include <sys/lock.h> |
58 |
#include <sys/lock.h> |
57 |
#include <sys/sema.h> |
59 |
#include <sys/sema.h> |
|
|
60 |
#include <sys/sglist.h> |
61 |
#include <machine/bus.h> |
62 |
#include <sys/bus_dma.h> |
58 |
|
63 |
|
59 |
#include <cam/cam.h> |
64 |
#include <cam/cam.h> |
60 |
#include <cam/cam_ccb.h> |
65 |
#include <cam/cam_ccb.h> |
Lines 66-72
Link Here
|
66 |
#include <cam/scsi/scsi_all.h> |
71 |
#include <cam/scsi/scsi_all.h> |
67 |
#include <cam/scsi/scsi_message.h> |
72 |
#include <cam/scsi/scsi_message.h> |
68 |
|
73 |
|
69 |
|
|
|
70 |
#include <dev/hyperv/include/hyperv.h> |
74 |
#include <dev/hyperv/include/hyperv.h> |
71 |
#include "hv_vstorage.h" |
75 |
#include "hv_vstorage.h" |
72 |
|
76 |
|
Lines 77-84
Link Here
|
77 |
#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS |
81 |
#define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS |
78 |
#define STORVSC_MAX_TARGETS (2) |
82 |
#define STORVSC_MAX_TARGETS (2) |
79 |
|
83 |
|
|
|
84 |
#define STORVSC_WIN7_MAJOR 4 |
85 |
#define STORVSC_WIN7_MINOR 2 |
86 |
|
87 |
#define STORVSC_WIN8_MAJOR 5 |
88 |
#define STORVSC_WIN8_MINOR 1 |
89 |
|
90 |
#define HV_ALIGN(x, a) (((x) + ((a) - 1)) & ~((a) - 1)) |
91 |
|
80 |
struct storvsc_softc; |
92 |
struct storvsc_softc; |
81 |
|
93 |
|
|
|
94 |
struct hv_sgl_node { |
95 |
LIST_ENTRY(hv_sgl_node) link; |
96 |
struct sglist *sgl_data; |
97 |
}; |
98 |
|
99 |
struct hv_sgl_page_pool{ |
100 |
LIST_HEAD(, hv_sgl_node) in_use_sgl_list; |
101 |
LIST_HEAD(, hv_sgl_node) free_sgl_list; |
102 |
boolean_t is_init; |
103 |
} g_hv_sgl_page_pool; |
104 |
|
105 |
#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT |
106 |
|
82 |
enum storvsc_request_type { |
107 |
enum storvsc_request_type { |
83 |
WRITE_TYPE, |
108 |
WRITE_TYPE, |
84 |
READ_TYPE, |
109 |
READ_TYPE, |
Lines 96-115
Link Here
|
96 |
struct storvsc_softc *softc; |
121 |
struct storvsc_softc *softc; |
97 |
struct callout callout; |
122 |
struct callout callout; |
98 |
struct sema synch_sema; /*Synchronize the request/response if needed */ |
123 |
struct sema synch_sema; /*Synchronize the request/response if needed */ |
|
|
124 |
struct sglist *bounce_sgl; |
125 |
unsigned int bounce_sgl_count; |
126 |
uint64_t not_aligned_seg_bits; |
99 |
}; |
127 |
}; |
100 |
|
128 |
|
101 |
struct storvsc_softc { |
129 |
struct storvsc_softc { |
102 |
struct hv_device *hs_dev; |
130 |
struct hv_device *hs_dev; |
103 |
LIST_HEAD(, hv_storvsc_request) hs_free_list; |
131 |
LIST_HEAD(, hv_storvsc_request) hs_free_list; |
104 |
struct mtx hs_lock; |
132 |
struct mtx hs_lock; |
105 |
struct storvsc_driver_props *hs_drv_props; |
133 |
struct storvsc_driver_props *hs_drv_props; |
106 |
int hs_unit; |
134 |
int hs_unit; |
107 |
uint32_t hs_frozen; |
135 |
uint32_t hs_frozen; |
108 |
struct cam_sim *hs_sim; |
136 |
struct cam_sim *hs_sim; |
109 |
struct cam_path *hs_path; |
137 |
struct cam_path *hs_path; |
110 |
uint32_t hs_num_out_reqs; |
138 |
uint32_t hs_num_out_reqs; |
111 |
boolean_t hs_destroy; |
139 |
boolean_t hs_destroy; |
112 |
boolean_t hs_drain_notify; |
140 |
boolean_t hs_drain_notify; |
|
|
141 |
boolean_t hs_open_multi_channel; |
113 |
struct sema hs_drain_sema; |
142 |
struct sema hs_drain_sema; |
114 |
struct hv_storvsc_request hs_init_req; |
143 |
struct hv_storvsc_request hs_init_req; |
115 |
struct hv_storvsc_request hs_reset_req; |
144 |
struct hv_storvsc_request hs_reset_req; |
Lines 124-130
Link Here
|
124 |
* The first can be tested by "sg_senddiag -vv /dev/daX", |
153 |
* The first can be tested by "sg_senddiag -vv /dev/daX", |
125 |
* and the second and third can be done by |
154 |
* and the second and third can be done by |
126 |
* "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". |
155 |
* "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". |
127 |
*/ |
156 |
*/ |
128 |
#define HVS_TIMEOUT_TEST 0 |
157 |
#define HVS_TIMEOUT_TEST 0 |
129 |
|
158 |
|
130 |
/* |
159 |
/* |
Lines 138-144
Link Here
|
138 |
char *drv_name; |
167 |
char *drv_name; |
139 |
char *drv_desc; |
168 |
char *drv_desc; |
140 |
uint8_t drv_max_luns_per_target; |
169 |
uint8_t drv_max_luns_per_target; |
141 |
uint8_t drv_max_ios_per_target; |
170 |
uint8_t drv_max_ios_per_target; |
142 |
uint32_t drv_ringbuffer_size; |
171 |
uint32_t drv_ringbuffer_size; |
143 |
}; |
172 |
}; |
144 |
|
173 |
|
Lines 150-155
Link Here
|
150 |
|
179 |
|
151 |
#define HS_MAX_ADAPTERS 10 |
180 |
#define HS_MAX_ADAPTERS 10 |
152 |
|
181 |
|
|
|
182 |
#define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1 |
183 |
|
153 |
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ |
184 |
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ |
154 |
static const hv_guid gStorVscDeviceType={ |
185 |
static const hv_guid gStorVscDeviceType={ |
155 |
.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, |
186 |
.data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, |
Lines 171-176
Link Here
|
171 |
STORVSC_RINGBUFFER_SIZE} |
202 |
STORVSC_RINGBUFFER_SIZE} |
172 |
}; |
203 |
}; |
173 |
|
204 |
|
|
|
205 |
static int storvsc_current_major; |
206 |
static int storvsc_current_minor; |
207 |
|
174 |
/* static functions */ |
208 |
/* static functions */ |
175 |
static int storvsc_probe(device_t dev); |
209 |
static int storvsc_probe(device_t dev); |
176 |
static int storvsc_attach(device_t dev); |
210 |
static int storvsc_attach(device_t dev); |
Lines 177-183
Link Here
|
177 |
static int storvsc_detach(device_t dev); |
211 |
static int storvsc_detach(device_t dev); |
178 |
static void storvsc_poll(struct cam_sim * sim); |
212 |
static void storvsc_poll(struct cam_sim * sim); |
179 |
static void storvsc_action(struct cam_sim * sim, union ccb * ccb); |
213 |
static void storvsc_action(struct cam_sim * sim, union ccb * ccb); |
180 |
static void create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); |
214 |
static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); |
181 |
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); |
215 |
static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); |
182 |
static enum hv_storage_type storvsc_get_storage_type(device_t dev); |
216 |
static enum hv_storage_type storvsc_get_storage_type(device_t dev); |
183 |
static void hv_storvsc_on_channel_callback(void *context); |
217 |
static void hv_storvsc_on_channel_callback(void *context); |
Lines 186-191
Link Here
|
186 |
struct hv_storvsc_request *request); |
220 |
struct hv_storvsc_request *request); |
187 |
static int hv_storvsc_connect_vsp(struct hv_device *device); |
221 |
static int hv_storvsc_connect_vsp(struct hv_device *device); |
188 |
static void storvsc_io_done(struct hv_storvsc_request *reqp); |
222 |
static void storvsc_io_done(struct hv_storvsc_request *reqp); |
|
|
223 |
void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, |
224 |
bus_dma_segment_t *orig_sgl, |
225 |
unsigned int orig_sgl_count, |
226 |
uint64_t seg_bits); |
227 |
void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, |
228 |
unsigned int dest_sgl_count, |
229 |
struct sglist* src_sgl, |
230 |
uint64_t seg_bits); |
189 |
|
231 |
|
190 |
static device_method_t storvsc_methods[] = { |
232 |
static device_method_t storvsc_methods[] = { |
191 |
/* Device interface */ |
233 |
/* Device interface */ |
Lines 207-213
Link Here
|
207 |
|
249 |
|
208 |
|
250 |
|
209 |
/** |
251 |
/** |
210 |
* The host is capable of sending messages to us that are |
252 |
* The host is capable of sending messages to us that are |
211 |
* completely unsolicited. So, we need to address the race |
253 |
* completely unsolicited. So, we need to address the race |
212 |
* condition where we may be in the process of unloading the |
254 |
* condition where we may be in the process of unloading the |
213 |
* driver when the host may send us an unsolicited message. |
255 |
* driver when the host may send us an unsolicited message. |
Lines 223-229
Link Here
|
223 |
* destroyed. |
265 |
* destroyed. |
224 |
* |
266 |
* |
225 |
* 3. Once the device is marked as being destroyed, we only |
267 |
* 3. Once the device is marked as being destroyed, we only |
226 |
* permit incoming traffic to properly account for |
268 |
* permit incoming traffic to properly account for |
227 |
* packets already sent out. |
269 |
* packets already sent out. |
228 |
*/ |
270 |
*/ |
229 |
static inline struct storvsc_softc * |
271 |
static inline struct storvsc_softc * |
Lines 260-265
Link Here
|
260 |
} |
302 |
} |
261 |
|
303 |
|
262 |
/** |
304 |
/** |
|
|
305 |
* @brief Callback handler, will be invoked when receive mutil-channel offer |
306 |
* |
307 |
* @param context new multi-channel |
308 |
*/ |
309 |
static void |
310 |
storvsc_handle_sc_creation(void *context) |
311 |
{ |
312 |
hv_vmbus_channel *new_channel = NULL; |
313 |
struct hv_device *device = NULL; |
314 |
struct storvsc_softc *sc = NULL; |
315 |
struct vmstor_chan_props props; |
316 |
int ret = 0; |
317 |
|
318 |
new_channel = (hv_vmbus_channel *)context; |
319 |
device = new_channel->primary_channel->device; |
320 |
sc = get_stor_device(device, TRUE); |
321 |
if (NULL == sc){ |
322 |
return; |
323 |
} |
324 |
|
325 |
if (FALSE == sc->hs_open_multi_channel){ |
326 |
return; |
327 |
} |
328 |
|
329 |
memset(&props, 0, sizeof(struct vmstor_chan_props)); |
330 |
|
331 |
ret = hv_vmbus_channel_open(new_channel, |
332 |
sc->hs_drv_props->drv_ringbuffer_size, |
333 |
sc->hs_drv_props->drv_ringbuffer_size, |
334 |
(void *)&props, |
335 |
sizeof(struct vmstor_chan_props), |
336 |
hv_storvsc_on_channel_callback, |
337 |
new_channel); |
338 |
|
339 |
return; |
340 |
} |
341 |
|
342 |
/** |
343 |
* @brief Send multi-channel creation request to host |
344 |
* |
345 |
* @param device a Hyper-V device pointer |
346 |
* @param max_chans the max channels supported by vmbus |
347 |
*/ |
348 |
static void |
349 |
storvsc_send_multichannel_request(struct hv_device *dev, int max_chans) |
350 |
{ |
351 |
struct storvsc_softc *sc = NULL; |
352 |
struct hv_storvsc_request *request = NULL; |
353 |
struct vstor_packet *vstor_packet = NULL; |
354 |
int request_channels_cnt = 0; |
355 |
int ret; |
356 |
|
357 |
/* get multichannels count that need to create */ |
358 |
request_channels_cnt = ((max_chans > mp_ncpus) ? mp_ncpus : max_chans); |
359 |
|
360 |
sc = get_stor_device(dev, TRUE); |
361 |
if (sc == NULL) { |
362 |
printf("Storvsc_error: get sc failed while send mutilchannel " |
363 |
"request\n"); |
364 |
return; |
365 |
} |
366 |
|
367 |
request = &sc->hs_init_req; |
368 |
|
369 |
/* Establish a handler for multi-channel */ |
370 |
dev->channel->sc_creation_callback = storvsc_handle_sc_creation; |
371 |
|
372 |
/* request the host to create multi-channel */ |
373 |
memset(request, 0, sizeof(struct hv_storvsc_request)); |
374 |
|
375 |
sema_init(&request->synch_sema, 0, ("stor_synch_sema")); |
376 |
|
377 |
vstor_packet = &request->vstor_packet; |
378 |
|
379 |
vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS; |
380 |
vstor_packet->flags = REQUEST_COMPLETION_FLAG; |
381 |
vstor_packet->u.multi_channels_cnt = request_channels_cnt; |
382 |
|
383 |
ret = hv_vmbus_channel_send_packet( |
384 |
dev->channel, |
385 |
vstor_packet, |
386 |
sizeof(struct vstor_packet), |
387 |
(uint64_t)(uintptr_t)request, |
388 |
HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, |
389 |
HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
390 |
|
391 |
/* wait for 500 ticks */ |
392 |
ret = sema_timedwait(&request->synch_sema, 500); |
393 |
if (ret != 0) { |
394 |
printf("Storvsc_error: create multi-channel timeout, %d\n", |
395 |
ret); |
396 |
return; |
397 |
} |
398 |
|
399 |
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || |
400 |
vstor_packet->status != 0) { |
401 |
printf("Storvsc_error: create multi-channel invalid operation " |
402 |
"(%d) or statue (%u)\n", |
403 |
vstor_packet->operation, vstor_packet->status); |
404 |
return; |
405 |
} |
406 |
|
407 |
sc->hs_open_multi_channel = TRUE; |
408 |
|
409 |
printf("Storvsc create multi-channel success!\n"); |
410 |
} |
411 |
|
412 |
/** |
263 |
* @brief initialize channel connection to parent partition |
413 |
* @brief initialize channel connection to parent partition |
264 |
* |
414 |
* |
265 |
* @param dev a Hyper-V device pointer |
415 |
* @param dev a Hyper-V device pointer |
Lines 272-277
Link Here
|
272 |
struct hv_storvsc_request *request; |
422 |
struct hv_storvsc_request *request; |
273 |
struct vstor_packet *vstor_packet; |
423 |
struct vstor_packet *vstor_packet; |
274 |
struct storvsc_softc *sc; |
424 |
struct storvsc_softc *sc; |
|
|
425 |
uint16_t max_chans = 0; |
426 |
boolean_t is_support_multichannel = FALSE; |
275 |
|
427 |
|
276 |
sc = get_stor_device(dev, TRUE); |
428 |
sc = get_stor_device(dev, TRUE); |
277 |
if (sc == NULL) { |
429 |
if (sc == NULL) { |
Lines 304-310
Link Here
|
304 |
goto cleanup; |
456 |
goto cleanup; |
305 |
} |
457 |
} |
306 |
|
458 |
|
307 |
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ |
459 |
/* wait 500 ticks */ |
|
|
460 |
ret = sema_timedwait(&request->synch_sema, 500); |
308 |
|
461 |
|
309 |
if (ret != 0) { |
462 |
if (ret != 0) { |
310 |
goto cleanup; |
463 |
goto cleanup; |
Lines 321-327
Link Here
|
321 |
vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; |
474 |
vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; |
322 |
vstor_packet->flags = REQUEST_COMPLETION_FLAG; |
475 |
vstor_packet->flags = REQUEST_COMPLETION_FLAG; |
323 |
|
476 |
|
324 |
vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT; |
477 |
vstor_packet->u.version.major_minor = |
|
|
478 |
VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor); |
325 |
|
479 |
|
326 |
/* revision is only significant for Windows guests */ |
480 |
/* revision is only significant for Windows guests */ |
327 |
vstor_packet->u.version.revision = 0; |
481 |
vstor_packet->u.version.revision = 0; |
Lines 338-344
Link Here
|
338 |
goto cleanup; |
492 |
goto cleanup; |
339 |
} |
493 |
} |
340 |
|
494 |
|
341 |
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ |
495 |
/* wait 500 ticks */ |
|
|
496 |
ret = sema_timedwait(&request->synch_sema, 500); |
342 |
|
497 |
|
343 |
if (ret) { |
498 |
if (ret) { |
344 |
goto cleanup; |
499 |
goto cleanup; |
Lines 369-375
Link Here
|
369 |
goto cleanup; |
524 |
goto cleanup; |
370 |
} |
525 |
} |
371 |
|
526 |
|
372 |
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ |
527 |
/* wait 500 ticks */ |
|
|
528 |
ret = sema_timedwait(&request->synch_sema, 500); |
373 |
|
529 |
|
374 |
if (ret != 0) { |
530 |
if (ret != 0) { |
375 |
goto cleanup; |
531 |
goto cleanup; |
Lines 377-386
Link Here
|
377 |
|
533 |
|
378 |
/* TODO: Check returned version */ |
534 |
/* TODO: Check returned version */ |
379 |
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || |
535 |
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || |
380 |
vstor_packet->status != 0) { |
536 |
vstor_packet->status != 0) { |
381 |
goto cleanup; |
537 |
goto cleanup; |
382 |
} |
538 |
} |
383 |
|
539 |
|
|
|
540 |
/* multi-channels feature is supported by WIN8 and above version */ |
541 |
max_chans = vstor_packet->u.chan_props.max_channel_cnt; |
542 |
if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) && |
543 |
(hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008)) { |
544 |
if (vstor_packet->u.chan_props.flags & |
545 |
HV_STORAGE_SUPPORTS_MULTI_CHANNEL) { |
546 |
is_support_multichannel = TRUE; |
547 |
} |
548 |
} |
549 |
|
384 |
memset(vstor_packet, 0, sizeof(struct vstor_packet)); |
550 |
memset(vstor_packet, 0, sizeof(struct vstor_packet)); |
385 |
vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; |
551 |
vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; |
386 |
vstor_packet->flags = REQUEST_COMPLETION_FLAG; |
552 |
vstor_packet->flags = REQUEST_COMPLETION_FLAG; |
Lines 397-403
Link Here
|
397 |
goto cleanup; |
563 |
goto cleanup; |
398 |
} |
564 |
} |
399 |
|
565 |
|
400 |
ret = sema_timedwait(&request->synch_sema, 500); /* KYS 5 seconds */ |
566 |
/* wait 500 ticks */ |
|
|
567 |
ret = sema_timedwait(&request->synch_sema, 500); |
401 |
|
568 |
|
402 |
if (ret != 0) { |
569 |
if (ret != 0) { |
403 |
goto cleanup; |
570 |
goto cleanup; |
Lines 404-413
Link Here
|
404 |
} |
571 |
} |
405 |
|
572 |
|
406 |
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || |
573 |
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || |
407 |
vstor_packet->status != 0) { |
574 |
vstor_packet->status != 0) { |
408 |
goto cleanup; |
575 |
goto cleanup; |
409 |
} |
576 |
} |
410 |
|
577 |
|
|
|
578 |
/* |
579 |
* If multi-channel is supported, send multichannel create |
580 |
* request to host. |
581 |
*/ |
582 |
if (is_support_multichannel){ |
583 |
storvsc_send_multichannel_request(dev, max_chans); |
584 |
} |
585 |
|
411 |
cleanup: |
586 |
cleanup: |
412 |
sema_destroy(&request->synch_sema); |
587 |
sema_destroy(&request->synch_sema); |
413 |
return (ret); |
588 |
return (ret); |
Lines 443-451
Link Here
|
443 |
(void *)&props, |
618 |
(void *)&props, |
444 |
sizeof(struct vmstor_chan_props), |
619 |
sizeof(struct vmstor_chan_props), |
445 |
hv_storvsc_on_channel_callback, |
620 |
hv_storvsc_on_channel_callback, |
446 |
dev); |
621 |
dev->channel); |
447 |
|
622 |
|
448 |
|
|
|
449 |
if (ret != 0) { |
623 |
if (ret != 0) { |
450 |
return ret; |
624 |
return ret; |
451 |
} |
625 |
} |
Lines 498-504
Link Here
|
498 |
|
672 |
|
499 |
|
673 |
|
500 |
/* |
674 |
/* |
501 |
* At this point, all outstanding requests in the adapter |
675 |
* At this point, all outstanding requests in the adapter |
502 |
* should have been flushed out and return to us |
676 |
* should have been flushed out and return to us |
503 |
*/ |
677 |
*/ |
504 |
|
678 |
|
Lines 521-526
Link Here
|
521 |
{ |
695 |
{ |
522 |
struct storvsc_softc *sc; |
696 |
struct storvsc_softc *sc; |
523 |
struct vstor_packet *vstor_packet = &request->vstor_packet; |
697 |
struct vstor_packet *vstor_packet = &request->vstor_packet; |
|
|
698 |
struct hv_vmbus_channel* outgoing_channel = NULL; |
524 |
int ret = 0; |
699 |
int ret = 0; |
525 |
|
700 |
|
526 |
sc = get_stor_device(device, TRUE); |
701 |
sc = get_stor_device(device, TRUE); |
Lines 539-557
Link Here
|
539 |
|
714 |
|
540 |
vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; |
715 |
vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; |
541 |
|
716 |
|
|
|
717 |
outgoing_channel = vmbus_select_outgoing_channel(device->channel); |
542 |
|
718 |
|
543 |
mtx_unlock(&request->softc->hs_lock); |
719 |
mtx_unlock(&request->softc->hs_lock); |
544 |
if (request->data_buf.length) { |
720 |
if (request->data_buf.length) { |
545 |
ret = hv_vmbus_channel_send_packet_multipagebuffer( |
721 |
ret = hv_vmbus_channel_send_packet_multipagebuffer( |
546 |
device->channel, |
722 |
outgoing_channel, |
547 |
&request->data_buf, |
723 |
&request->data_buf, |
548 |
vstor_packet, |
724 |
vstor_packet, |
549 |
sizeof(struct vstor_packet), |
725 |
sizeof(struct vstor_packet), |
550 |
(uint64_t)(uintptr_t)request); |
726 |
(uint64_t)(uintptr_t)request); |
551 |
|
727 |
|
552 |
} else { |
728 |
} else { |
553 |
ret = hv_vmbus_channel_send_packet( |
729 |
ret = hv_vmbus_channel_send_packet( |
554 |
device->channel, |
730 |
outgoing_channel, |
555 |
vstor_packet, |
731 |
vstor_packet, |
556 |
sizeof(struct vstor_packet), |
732 |
sizeof(struct vstor_packet), |
557 |
(uint64_t)(uintptr_t)request, |
733 |
(uint64_t)(uintptr_t)request, |
Lines 610-616
Link Here
|
610 |
hv_storvsc_on_channel_callback(void *context) |
786 |
hv_storvsc_on_channel_callback(void *context) |
611 |
{ |
787 |
{ |
612 |
int ret = 0; |
788 |
int ret = 0; |
613 |
struct hv_device *device = (struct hv_device *)context; |
789 |
hv_vmbus_channel *channel = (hv_vmbus_channel *)context; |
|
|
790 |
struct hv_device *device = NULL; |
614 |
struct storvsc_softc *sc; |
791 |
struct storvsc_softc *sc; |
615 |
uint32_t bytes_recvd; |
792 |
uint32_t bytes_recvd; |
616 |
uint64_t request_id; |
793 |
uint64_t request_id; |
Lines 618-632
Link Here
|
618 |
struct hv_storvsc_request *request; |
795 |
struct hv_storvsc_request *request; |
619 |
struct vstor_packet *vstor_packet; |
796 |
struct vstor_packet *vstor_packet; |
620 |
|
797 |
|
|
|
798 |
if (channel->primary_channel != NULL){ |
799 |
device = channel->primary_channel->device; |
800 |
} else { |
801 |
device = channel->device; |
802 |
} |
803 |
|
804 |
KASSERT(device, ("device")); |
805 |
|
621 |
sc = get_stor_device(device, FALSE); |
806 |
sc = get_stor_device(device, FALSE); |
622 |
if (sc == NULL) { |
807 |
if (sc == NULL) { |
|
|
808 |
printf("Storvsc_error: get stor device failed.\n"); |
623 |
return; |
809 |
return; |
624 |
} |
810 |
} |
625 |
|
811 |
|
626 |
KASSERT(device, ("device")); |
|
|
627 |
|
628 |
ret = hv_vmbus_channel_recv_packet( |
812 |
ret = hv_vmbus_channel_recv_packet( |
629 |
device->channel, |
813 |
channel, |
630 |
packet, |
814 |
packet, |
631 |
roundup2(sizeof(struct vstor_packet), 8), |
815 |
roundup2(sizeof(struct vstor_packet), 8), |
632 |
&bytes_recvd, |
816 |
&bytes_recvd, |
Lines 634-654
Link Here
|
634 |
|
818 |
|
635 |
while ((ret == 0) && (bytes_recvd > 0)) { |
819 |
while ((ret == 0) && (bytes_recvd > 0)) { |
636 |
request = (struct hv_storvsc_request *)(uintptr_t)request_id; |
820 |
request = (struct hv_storvsc_request *)(uintptr_t)request_id; |
637 |
KASSERT(request, ("request")); |
|
|
638 |
|
821 |
|
639 |
if ((request == &sc->hs_init_req) || |
822 |
if ((request == &sc->hs_init_req) || |
640 |
(request == &sc->hs_reset_req)) { |
823 |
(request == &sc->hs_reset_req)) { |
641 |
memcpy(&request->vstor_packet, packet, |
824 |
memcpy(&request->vstor_packet, packet, |
642 |
sizeof(struct vstor_packet)); |
825 |
sizeof(struct vstor_packet)); |
643 |
sema_post(&request->synch_sema); |
826 |
sema_post(&request->synch_sema); |
644 |
} else { |
827 |
} else { |
645 |
vstor_packet = (struct vstor_packet *)packet; |
828 |
vstor_packet = (struct vstor_packet *)packet; |
646 |
switch(vstor_packet->operation) { |
829 |
switch(vstor_packet->operation) { |
647 |
case VSTOR_OPERATION_COMPLETEIO: |
830 |
case VSTOR_OPERATION_COMPLETEIO: |
|
|
831 |
if (request == NULL) { |
832 |
printf("VMBUS: storvsc received a " |
833 |
"packet with NULL request id in " |
834 |
"COMPLETEIO operation. Panick!\n"); |
835 |
KASSERT(request, ("request")); |
836 |
} |
648 |
hv_storvsc_on_iocompletion(sc, |
837 |
hv_storvsc_on_iocompletion(sc, |
649 |
vstor_packet, request); |
838 |
vstor_packet, request); |
650 |
break; |
839 |
break; |
651 |
case VSTOR_OPERATION_REMOVEDEVICE: |
840 |
case VSTOR_OPERATION_REMOVEDEVICE: |
|
|
841 |
case VSTOR_OPERATION_ENUMERATE_BUS: |
842 |
printf("VMBUS: storvsc operation %d not " |
843 |
"implemented.\n", vstor_packet->operation); |
652 |
/* TODO: implement */ |
844 |
/* TODO: implement */ |
653 |
break; |
845 |
break; |
654 |
default: |
846 |
default: |
Lines 656-662
Link Here
|
656 |
} |
848 |
} |
657 |
} |
849 |
} |
658 |
ret = hv_vmbus_channel_recv_packet( |
850 |
ret = hv_vmbus_channel_recv_packet( |
659 |
device->channel, |
851 |
channel, |
660 |
packet, |
852 |
packet, |
661 |
roundup2(sizeof(struct vstor_packet), 8), |
853 |
roundup2(sizeof(struct vstor_packet), 8), |
662 |
&bytes_recvd, |
854 |
&bytes_recvd, |
Lines 680-686
Link Here
|
680 |
{ |
872 |
{ |
681 |
int ata_disk_enable = 0; |
873 |
int ata_disk_enable = 0; |
682 |
int ret = ENXIO; |
874 |
int ret = ENXIO; |
683 |
|
875 |
|
|
|
876 |
if ((HV_VMBUS_VERSION_WIN8 == hv_vmbus_protocal_version) || |
877 |
(HV_VMBUS_VERSION_WIN8_1 == hv_vmbus_protocal_version)){ |
878 |
storvsc_current_major = STORVSC_WIN8_MAJOR; |
879 |
storvsc_current_minor = STORVSC_WIN8_MINOR; |
880 |
} else { |
881 |
storvsc_current_major = STORVSC_WIN7_MAJOR; |
882 |
storvsc_current_minor = STORVSC_WIN7_MINOR; |
883 |
} |
884 |
|
684 |
switch (storvsc_get_storage_type(dev)) { |
885 |
switch (storvsc_get_storage_type(dev)) { |
685 |
case DRIVER_BLKVSC: |
886 |
case DRIVER_BLKVSC: |
686 |
if(bootverbose) |
887 |
if(bootverbose) |
Lines 721-729
Link Here
|
721 |
enum hv_storage_type stor_type; |
922 |
enum hv_storage_type stor_type; |
722 |
struct storvsc_softc *sc; |
923 |
struct storvsc_softc *sc; |
723 |
struct cam_devq *devq; |
924 |
struct cam_devq *devq; |
724 |
int ret, i; |
925 |
int ret, i, j; |
725 |
struct hv_storvsc_request *reqp; |
926 |
struct hv_storvsc_request *reqp; |
726 |
struct root_hold_token *root_mount_token = NULL; |
927 |
struct root_hold_token *root_mount_token = NULL; |
|
|
928 |
struct hv_sgl_node *sgl_node = NULL; |
929 |
void *tmp_buff = NULL; |
727 |
|
930 |
|
728 |
/* |
931 |
/* |
729 |
* We need to serialize storvsc attach calls. |
932 |
* We need to serialize storvsc attach calls. |
Lines 764-771
Link Here
|
764 |
LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); |
967 |
LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); |
765 |
} |
968 |
} |
766 |
|
969 |
|
|
|
970 |
/* create sg-list page pool */ |
971 |
if (FALSE == g_hv_sgl_page_pool.is_init){ |
972 |
g_hv_sgl_page_pool.is_init = TRUE; |
973 |
LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list); |
974 |
LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list); |
975 |
|
976 |
/* pre-create SG list, each SG list with HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each segment has one page buffer */ |
977 |
for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++){ |
978 |
sgl_node = malloc(sizeof(struct hv_sgl_node), |
979 |
M_DEVBUF, M_WAITOK|M_ZERO); |
980 |
if (NULL == sgl_node){ |
981 |
ret = ENOMEM; |
982 |
goto cleanup; |
983 |
} |
984 |
|
985 |
sgl_node->sgl_data = sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT, |
986 |
M_WAITOK|M_ZERO); |
987 |
if (NULL == sgl_node->sgl_data){ |
988 |
ret = ENOMEM; |
989 |
goto cleanup; |
990 |
} |
991 |
|
992 |
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ |
993 |
tmp_buff = malloc(PAGE_SIZE, |
994 |
M_DEVBUF, M_WAITOK|M_ZERO); |
995 |
if (NULL == tmp_buff){ |
996 |
ret = ENOMEM; |
997 |
goto cleanup; |
998 |
} |
999 |
|
1000 |
sgl_node->sgl_data->sg_segs[j].ss_paddr = (vm_paddr_t)tmp_buff; |
1001 |
} |
1002 |
|
1003 |
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); |
1004 |
} |
1005 |
} |
1006 |
|
767 |
sc->hs_destroy = FALSE; |
1007 |
sc->hs_destroy = FALSE; |
768 |
sc->hs_drain_notify = FALSE; |
1008 |
sc->hs_drain_notify = FALSE; |
|
|
1009 |
sc->hs_open_multi_channel = FALSE; |
769 |
sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); |
1010 |
sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); |
770 |
|
1011 |
|
771 |
ret = hv_storvsc_connect_vsp(hv_dev); |
1012 |
ret = hv_storvsc_connect_vsp(hv_dev); |
Lines 834-839
Link Here
|
834 |
LIST_REMOVE(reqp, link); |
1075 |
LIST_REMOVE(reqp, link); |
835 |
free(reqp, M_DEVBUF); |
1076 |
free(reqp, M_DEVBUF); |
836 |
} |
1077 |
} |
|
|
1078 |
|
1079 |
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { |
1080 |
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); |
1081 |
LIST_REMOVE(sgl_node, link); |
1082 |
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ |
1083 |
if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr){ |
1084 |
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); |
1085 |
} |
1086 |
} |
1087 |
sglist_free(sgl_node->sgl_data); |
1088 |
free(sgl_node, M_DEVBUF); |
1089 |
} |
1090 |
|
837 |
return (ret); |
1091 |
return (ret); |
838 |
} |
1092 |
} |
839 |
|
1093 |
|
Lines 853-858
Link Here
|
853 |
struct storvsc_softc *sc = device_get_softc(dev); |
1107 |
struct storvsc_softc *sc = device_get_softc(dev); |
854 |
struct hv_storvsc_request *reqp = NULL; |
1108 |
struct hv_storvsc_request *reqp = NULL; |
855 |
struct hv_device *hv_device = vmbus_get_devctx(dev); |
1109 |
struct hv_device *hv_device = vmbus_get_devctx(dev); |
|
|
1110 |
struct hv_sgl_node *sgl_node = NULL; |
1111 |
int j = 0; |
856 |
|
1112 |
|
857 |
mtx_lock(&hv_device->channel->inbound_lock); |
1113 |
mtx_lock(&hv_device->channel->inbound_lock); |
858 |
sc->hs_destroy = TRUE; |
1114 |
sc->hs_destroy = TRUE; |
Lines 884-889
Link Here
|
884 |
free(reqp, M_DEVBUF); |
1140 |
free(reqp, M_DEVBUF); |
885 |
} |
1141 |
} |
886 |
mtx_unlock(&sc->hs_lock); |
1142 |
mtx_unlock(&sc->hs_lock); |
|
|
1143 |
|
1144 |
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { |
1145 |
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); |
1146 |
LIST_REMOVE(sgl_node, link); |
1147 |
for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ |
1148 |
if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr){ |
1149 |
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); |
1150 |
} |
1151 |
} |
1152 |
sglist_free(sgl_node->sgl_data); |
1153 |
free(sgl_node, M_DEVBUF); |
1154 |
} |
1155 |
|
887 |
return (0); |
1156 |
return (0); |
888 |
} |
1157 |
} |
889 |
|
1158 |
|
Lines 939-945
Link Here
|
939 |
ticks, __func__, (ret == 0)? |
1208 |
ticks, __func__, (ret == 0)? |
940 |
"IO return detected" : |
1209 |
"IO return detected" : |
941 |
"IO return not detected"); |
1210 |
"IO return not detected"); |
942 |
/* |
1211 |
/* |
943 |
* Now both the timer handler and io done are running |
1212 |
* Now both the timer handler and io done are running |
944 |
* simultaneously. We want to confirm the io done always |
1213 |
* simultaneously. We want to confirm the io done always |
945 |
* finishes after the timer handler exits. So reqp used by |
1214 |
* finishes after the timer handler exits. So reqp used by |
Lines 1024-1030
Link Here
|
1024 |
|
1293 |
|
1025 |
mtx_assert(&sc->hs_lock, MA_OWNED); |
1294 |
mtx_assert(&sc->hs_lock, MA_OWNED); |
1026 |
mtx_unlock(&sc->hs_lock); |
1295 |
mtx_unlock(&sc->hs_lock); |
1027 |
hv_storvsc_on_channel_callback(sc->hs_dev); |
1296 |
hv_storvsc_on_channel_callback(sc->hs_dev->channel); |
1028 |
mtx_lock(&sc->hs_lock); |
1297 |
mtx_lock(&sc->hs_lock); |
1029 |
} |
1298 |
} |
1030 |
|
1299 |
|
Lines 1152-1161
Link Here
|
1152 |
|
1421 |
|
1153 |
bzero(reqp, sizeof(struct hv_storvsc_request)); |
1422 |
bzero(reqp, sizeof(struct hv_storvsc_request)); |
1154 |
reqp->softc = sc; |
1423 |
reqp->softc = sc; |
|
|
1424 |
|
1425 |
ccb->ccb_h.status |= CAM_SIM_QUEUED; |
1426 |
if ((res = create_storvsc_request(ccb, reqp)) != 0) { |
1427 |
ccb->ccb_h.status = CAM_REQ_INVALID; |
1428 |
xpt_done(ccb); |
1429 |
return; |
1430 |
} |
1155 |
|
1431 |
|
1156 |
ccb->ccb_h.status |= CAM_SIM_QUEUED; |
|
|
1157 |
create_storvsc_request(ccb, reqp); |
1158 |
|
1159 |
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { |
1432 |
if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { |
1160 |
callout_init(&reqp->callout, CALLOUT_MPSAFE); |
1433 |
callout_init(&reqp->callout, CALLOUT_MPSAFE); |
1161 |
callout_reset(&reqp->callout, |
1434 |
callout_reset(&reqp->callout, |
Lines 1195-1200
Link Here
|
1195 |
} |
1468 |
} |
1196 |
|
1469 |
|
1197 |
/** |
1470 |
/** |
|
|
1471 |
* @brief destroy bounce buffer |
1472 |
* |
1473 |
* This function is responsible for destroy a Scatter/Gather list |
1474 |
* that create by storvsc_create_bounce_buffer() |
1475 |
* |
1476 |
* @param sgl- the Scatter/Gather need be destroy |
1477 |
* @param sg_count- page count of the SG list. |
1478 |
* |
1479 |
*/ |
1480 |
static void |
1481 |
storvsc_destroy_bounce_buffer(struct sglist *sgl) |
1482 |
{ |
1483 |
struct hv_sgl_node *sgl_node = NULL; |
1484 |
|
1485 |
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list); |
1486 |
LIST_REMOVE(sgl_node, link); |
1487 |
if (NULL == sgl_node) { |
1488 |
printf("storvsc error: not enough in use sgl\n"); |
1489 |
return; |
1490 |
} |
1491 |
sgl_node->sgl_data = sgl; |
1492 |
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); |
1493 |
} |
1494 |
|
1495 |
/** |
1496 |
* @brief create bounce buffer |
1497 |
* |
1498 |
* This function is responsible for create a Scatter/Gather list, |
1499 |
* which hold several pages that can be aligned with page size. |
1500 |
* |
1501 |
* @param seg_count- SG-list segments count |
1502 |
* @param write - if WRITE_TYPE, set SG list page used size to 0, |
1503 |
* otherwise set used size to page size. |
1504 |
* |
1505 |
* return NULL if create failed |
1506 |
*/ |
1507 |
static struct sglist * |
1508 |
storvsc_create_bounce_buffer(uint16_t seg_count, int write) |
1509 |
{ |
1510 |
int i = 0; |
1511 |
struct sglist *bounce_sgl = NULL; |
1512 |
unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); |
1513 |
struct hv_sgl_node *sgl_node = NULL; |
1514 |
|
1515 |
/* get struct sglist from free_sgl_list */ |
1516 |
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); |
1517 |
LIST_REMOVE(sgl_node, link); |
1518 |
if (NULL == sgl_node) { |
1519 |
printf("storvsc error: not enough free sgl\n"); |
1520 |
return NULL; |
1521 |
} |
1522 |
bounce_sgl = sgl_node->sgl_data; |
1523 |
LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link); |
1524 |
|
1525 |
bounce_sgl->sg_maxseg = seg_count; |
1526 |
if (write == WRITE_TYPE) { |
1527 |
bounce_sgl->sg_nseg = 0; |
1528 |
} else { |
1529 |
bounce_sgl->sg_nseg = seg_count; |
1530 |
} |
1531 |
|
1532 |
for (i = 0; i < seg_count; i++) { |
1533 |
bounce_sgl->sg_segs[i].ss_len = buf_len; |
1534 |
} |
1535 |
|
1536 |
return bounce_sgl; |
1537 |
} |
1538 |
|
1539 |
/** |
1540 |
* @brief copy data from SG list to bounce buffer |
1541 |
* |
1542 |
* This function is responsible for copy data from one SG list's segments |
1543 |
* to another SG list which used as bounce buffer. |
1544 |
* |
1545 |
* @param bounce_sgl - the destination SG list |
1546 |
* @param orig_sgl - the segment of the source SG list. |
1547 |
* @param orig_sgl_count - the count of segments. |
1548 |
* @param orig_sgl_count - indicate which segment need bounce buffer, set 1 means need. |
1549 |
* |
1550 |
*/ |
1551 |
void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, |
1552 |
bus_dma_segment_t *orig_sgl, |
1553 |
unsigned int orig_sgl_count, |
1554 |
uint64_t seg_bits) |
1555 |
{ |
1556 |
int src_sgl_idx = 0; |
1557 |
|
1558 |
for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) { |
1559 |
if (seg_bits & (1 << src_sgl_idx)) { |
1560 |
memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr, |
1561 |
(void*)orig_sgl[src_sgl_idx].ds_addr, |
1562 |
orig_sgl[src_sgl_idx].ds_len); |
1563 |
bounce_sgl->sg_segs[src_sgl_idx].ss_len = |
1564 |
orig_sgl[src_sgl_idx].ds_len; |
1565 |
} |
1566 |
} |
1567 |
} |
1568 |
|
1569 |
/** |
1570 |
* @brief copy data from SG list which used as bounce to another SG list |
1571 |
* |
1572 |
* This function is responsible for copy data from one SG list with bounce |
1573 |
* buffer to another SG list's segments. |
1574 |
* |
1575 |
* @param dest_sgl - the destination SG list's segments |
1576 |
* @param dest_sgl_count - the count of destination SG list's segment. |
1577 |
* @param src_sgl - the source SG list. |
1578 |
* @param seg_bits - indicate which segment used bounce buffer of src SG-list. |
1579 |
* |
1580 |
*/ |
1581 |
void |
1582 |
storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, |
1583 |
unsigned int dest_sgl_count, |
1584 |
struct sglist* src_sgl, |
1585 |
uint64_t seg_bits) |
1586 |
{ |
1587 |
int sgl_idx = 0; |
1588 |
|
1589 |
for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) { |
1590 |
if (seg_bits & (1 << sgl_idx)) { |
1591 |
memcpy((void*)(dest_sgl[sgl_idx].ds_addr), |
1592 |
(void*)(src_sgl->sg_segs[sgl_idx].ss_paddr), |
1593 |
src_sgl->sg_segs[sgl_idx].ss_len); |
1594 |
} |
1595 |
} |
1596 |
} |
1597 |
|
1598 |
/** |
1599 |
* @brief check SG list with bounce buffer or not |
1600 |
* |
1601 |
* This function is responsible for check if need bounce buffer for SG list. |
1602 |
* |
1603 |
* @param sgl - the SG list's segments |
1604 |
* @param sg_count - the count of SG list's segment. |
1605 |
* @param bits - segmengs number that need bounce buffer |
1606 |
* |
1607 |
* return -1 if SG list needless bounce buffer |
1608 |
*/ |
1609 |
static int |
1610 |
storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl, unsigned int sg_count, uint64_t *bits) |
1611 |
{ |
1612 |
int i = 0; |
1613 |
int offset = 0; |
1614 |
uint64_t phys_addr = 0; |
1615 |
uint64_t tmp_bits = 0; |
1616 |
boolean_t found_hole = FALSE; |
1617 |
boolean_t pre_aligned = TRUE; |
1618 |
|
1619 |
if (sg_count < 2){ |
1620 |
return -1; |
1621 |
} |
1622 |
|
1623 |
*bits = 0; |
1624 |
|
1625 |
phys_addr = vtophys(sgl[0].ds_addr); |
1626 |
offset = phys_addr - trunc_page(phys_addr); |
1627 |
if (offset){ |
1628 |
pre_aligned = FALSE; |
1629 |
tmp_bits |= 1; |
1630 |
} |
1631 |
|
1632 |
for (i = 1; i < sg_count; i++) { |
1633 |
phys_addr = vtophys(sgl[i].ds_addr); |
1634 |
offset = phys_addr - trunc_page(phys_addr); |
1635 |
|
1636 |
if (0 == offset) { |
1637 |
if (FALSE == pre_aligned){ |
1638 |
/* |
1639 |
* This segment is aligned, if the previous |
1640 |
* one is not aligned, find a hole |
1641 |
*/ |
1642 |
found_hole = TRUE; |
1643 |
} |
1644 |
pre_aligned = TRUE; |
1645 |
} else { |
1646 |
tmp_bits |= 1 << i; |
1647 |
if (FALSE == pre_aligned) { |
1648 |
if (phys_addr != vtophys(sgl[i-1].ds_addr + |
1649 |
sgl[i-1].ds_len)) { |
1650 |
/* |
1651 |
* Check whether connect to previous |
1652 |
* segment,if not, find the hole |
1653 |
*/ |
1654 |
found_hole = TRUE; |
1655 |
} |
1656 |
} else { |
1657 |
found_hole = TRUE; |
1658 |
} |
1659 |
pre_aligned = FALSE; |
1660 |
} |
1661 |
} |
1662 |
|
1663 |
if (FALSE == found_hole) { |
1664 |
return -1; |
1665 |
} else { |
1666 |
*bits = tmp_bits; |
1667 |
return 0; |
1668 |
} |
1669 |
} |
1670 |
|
1671 |
/** |
1198 |
* @brief Fill in a request structure based on a CAM control block |
1672 |
* @brief Fill in a request structure based on a CAM control block |
1199 |
* |
1673 |
* |
1200 |
* Fills in a request structure based on the contents of a CAM control |
1674 |
* Fills in a request structure based on the contents of a CAM control |
Lines 1204-1210
Link Here
|
1204 |
* @param ccb pointer to a CAM contorl block |
1678 |
* @param ccb pointer to a CAM contorl block |
1205 |
* @param reqp pointer to a request structure |
1679 |
* @param reqp pointer to a request structure |
1206 |
*/ |
1680 |
*/ |
1207 |
static void |
1681 |
static int |
1208 |
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) |
1682 |
create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) |
1209 |
{ |
1683 |
{ |
1210 |
struct ccb_scsiio *csio = &ccb->csio; |
1684 |
struct ccb_scsiio *csio = &ccb->csio; |
Lines 1212-1217
Link Here
|
1212 |
uint32_t bytes_to_copy = 0; |
1686 |
uint32_t bytes_to_copy = 0; |
1213 |
uint32_t pfn_num = 0; |
1687 |
uint32_t pfn_num = 0; |
1214 |
uint32_t pfn; |
1688 |
uint32_t pfn; |
|
|
1689 |
uint64_t not_aligned_seg_bits = 0; |
1215 |
|
1690 |
|
1216 |
/* refer to struct vmscsi_req for meanings of these two fields */ |
1691 |
/* refer to struct vmscsi_req for meanings of these two fields */ |
1217 |
reqp->vstor_packet.u.vm_srb.port = |
1692 |
reqp->vstor_packet.u.vm_srb.port = |
Lines 1232-1249
Link Here
|
1232 |
} |
1707 |
} |
1233 |
|
1708 |
|
1234 |
switch (ccb->ccb_h.flags & CAM_DIR_MASK) { |
1709 |
switch (ccb->ccb_h.flags & CAM_DIR_MASK) { |
1235 |
case CAM_DIR_OUT: |
1710 |
case CAM_DIR_OUT: |
1236 |
reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; |
1711 |
reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; |
1237 |
break; |
1712 |
break; |
1238 |
case CAM_DIR_IN: |
1713 |
case CAM_DIR_IN: |
1239 |
reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; |
1714 |
reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; |
1240 |
break; |
1715 |
break; |
1241 |
case CAM_DIR_NONE: |
1716 |
case CAM_DIR_NONE: |
1242 |
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; |
1717 |
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; |
1243 |
break; |
1718 |
break; |
1244 |
default: |
1719 |
default: |
1245 |
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; |
1720 |
reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; |
1246 |
break; |
1721 |
break; |
1247 |
} |
1722 |
} |
1248 |
|
1723 |
|
1249 |
reqp->sense_data = &csio->sense_data; |
1724 |
reqp->sense_data = &csio->sense_data; |
Lines 1250-1279
Link Here
|
1250 |
reqp->sense_info_len = csio->sense_len; |
1725 |
reqp->sense_info_len = csio->sense_len; |
1251 |
|
1726 |
|
1252 |
reqp->ccb = ccb; |
1727 |
reqp->ccb = ccb; |
1253 |
/* |
1728 |
|
1254 |
KASSERT((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0, |
1729 |
if (0 == csio->dxfer_len) { |
1255 |
("ccb is scatter gather valid\n")); |
1730 |
return 0; |
1256 |
*/ |
1731 |
} |
1257 |
if (csio->dxfer_len != 0) { |
1732 |
|
1258 |
reqp->data_buf.length = csio->dxfer_len; |
1733 |
reqp->data_buf.length = csio->dxfer_len; |
|
|
1734 |
|
1735 |
switch (ccb->ccb_h.flags & CAM_DATA_MASK) { |
1736 |
case CAM_DATA_VADDR:{ |
1259 |
bytes_to_copy = csio->dxfer_len; |
1737 |
bytes_to_copy = csio->dxfer_len; |
1260 |
phys_addr = vtophys(csio->data_ptr); |
1738 |
phys_addr = vtophys(csio->data_ptr); |
1261 |
reqp->data_buf.offset = phys_addr - trunc_page(phys_addr); |
1739 |
reqp->data_buf.offset = phys_addr & PAGE_MASK; |
|
|
1740 |
|
1741 |
while (bytes_to_copy != 0) { |
1742 |
int bytes, page_offset; |
1743 |
phys_addr = |
1744 |
vtophys(&csio->data_ptr[reqp->data_buf.length - |
1745 |
bytes_to_copy]); |
1746 |
pfn = phys_addr >> PAGE_SHIFT; |
1747 |
reqp->data_buf.pfn_array[pfn_num] = pfn; |
1748 |
page_offset = phys_addr & PAGE_MASK; |
1749 |
|
1750 |
bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); |
1751 |
|
1752 |
bytes_to_copy -= bytes; |
1753 |
pfn_num++; |
1754 |
} |
1755 |
break; |
1262 |
} |
1756 |
} |
|
|
1757 |
case CAM_DATA_SG:{ |
1758 |
int i = 0; |
1759 |
int offset = 0; |
1760 |
bus_dma_segment_t *storvsc_sglist = |
1761 |
(bus_dma_segment_t *)ccb->csio.data_ptr; |
1762 |
u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt; |
1263 |
|
1763 |
|
1264 |
while (bytes_to_copy != 0) { |
1764 |
printf("Storvsc: get SG I/O operation, %d\n", |
1265 |
int bytes, page_offset; |
1765 |
reqp->vstor_packet.u.vm_srb.data_in); |
1266 |
phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length - |
|
|
1267 |
bytes_to_copy]); |
1268 |
pfn = phys_addr >> PAGE_SHIFT; |
1269 |
reqp->data_buf.pfn_array[pfn_num] = pfn; |
1270 |
page_offset = phys_addr - trunc_page(phys_addr); |
1271 |
|
1766 |
|
1272 |
bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); |
1767 |
if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){ |
|
|
1768 |
printf("Storvsc: %d segments is too much, " |
1769 |
"only support %d segments\n", |
1770 |
storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT); |
1771 |
return EINVAL; |
1772 |
} |
1273 |
|
1773 |
|
1274 |
bytes_to_copy -= bytes; |
1774 |
/* check if we need to create bounce buffer */ |
1275 |
pfn_num++; |
1775 |
if (storvsc_check_bounce_buffer_sgl( |
|
|
1776 |
storvsc_sglist, |
1777 |
storvsc_sg_count, |
1778 |
¬_aligned_seg_bits) != -1) { |
1779 |
reqp->bounce_sgl = |
1780 |
storvsc_create_bounce_buffer(storvsc_sg_count, |
1781 |
reqp->vstor_packet.u.vm_srb.data_in); |
1782 |
if (NULL == reqp->bounce_sgl) { |
1783 |
printf("Storvsc_error: create bounce buffer failed.\n"); |
1784 |
return ENOMEM; |
1785 |
} |
1786 |
|
1787 |
reqp->bounce_sgl_count = storvsc_sg_count; |
1788 |
reqp->not_aligned_seg_bits = not_aligned_seg_bits; |
1789 |
|
1790 |
/* |
1791 |
* if it is write, we need copy the original data |
1792 |
*to bounce buffer |
1793 |
*/ |
1794 |
if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { |
1795 |
storvsc_copy_sgl_to_bounce_buf( |
1796 |
reqp->bounce_sgl, |
1797 |
storvsc_sglist, |
1798 |
storvsc_sg_count, |
1799 |
reqp->not_aligned_seg_bits); |
1800 |
} |
1801 |
|
1802 |
/* transfer virtual address to physical frame number */ |
1803 |
if (reqp->not_aligned_seg_bits & 0x1){ |
1804 |
phys_addr = |
1805 |
vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr); |
1806 |
}else{ |
1807 |
phys_addr = |
1808 |
vtophys(storvsc_sglist[0].ds_addr); |
1809 |
} |
1810 |
reqp->data_buf.offset = phys_addr & PAGE_MASK; |
1811 |
|
1812 |
pfn = phys_addr >> PAGE_SHIFT; |
1813 |
reqp->data_buf.pfn_array[0] = pfn; |
1814 |
|
1815 |
for (i = 1; i < storvsc_sg_count; i++) { |
1816 |
if (reqp->not_aligned_seg_bits & (1 << i)){ |
1817 |
phys_addr = |
1818 |
vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr); |
1819 |
} |
1820 |
else{ |
1821 |
phys_addr = |
1822 |
vtophys(storvsc_sglist[i].ds_addr); |
1823 |
} |
1824 |
|
1825 |
pfn = phys_addr >> PAGE_SHIFT; |
1826 |
reqp->data_buf.pfn_array[i] = pfn; |
1827 |
} |
1828 |
} |
1829 |
else { |
1830 |
phys_addr = vtophys(storvsc_sglist[0].ds_addr); |
1831 |
|
1832 |
reqp->data_buf.offset = phys_addr & PAGE_MASK; |
1833 |
|
1834 |
for (i = 0; i < storvsc_sg_count; i++){ |
1835 |
phys_addr = vtophys(storvsc_sglist[i].ds_addr); |
1836 |
pfn = phys_addr >> PAGE_SHIFT; |
1837 |
reqp->data_buf.pfn_array[i] = pfn; |
1838 |
} |
1839 |
|
1840 |
/* check the last segment cross boundary or not */ |
1841 |
offset = phys_addr & PAGE_MASK; |
1842 |
if (offset){ |
1843 |
phys_addr = |
1844 |
vtophys(storvsc_sglist[i-1].ds_addr + |
1845 |
PAGE_SIZE - offset); |
1846 |
pfn = phys_addr >> PAGE_SHIFT; |
1847 |
reqp->data_buf.pfn_array[i] = pfn; |
1848 |
} |
1849 |
|
1850 |
reqp->bounce_sgl_count = 0; |
1851 |
} |
1852 |
break; |
1276 |
} |
1853 |
} |
|
|
1854 |
default: |
1855 |
printf("Unknow flags: %d\n", ccb->ccb_h.flags); |
1856 |
return EINVAL; |
1857 |
} |
1858 |
|
1859 |
return 0; |
1277 |
} |
1860 |
} |
1278 |
|
1861 |
|
1279 |
/** |
1862 |
/** |
Lines 1292-1298
Link Here
|
1292 |
struct ccb_scsiio *csio = &ccb->csio; |
1875 |
struct ccb_scsiio *csio = &ccb->csio; |
1293 |
struct storvsc_softc *sc = reqp->softc; |
1876 |
struct storvsc_softc *sc = reqp->softc; |
1294 |
struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; |
1877 |
struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; |
1295 |
|
1878 |
bus_dma_segment_t *ori_sglist = NULL; |
|
|
1879 |
int ori_sg_count = 0; |
1880 |
|
1881 |
/* destroy bounce buffer if it is used */ |
1882 |
if (reqp->bounce_sgl_count) { |
1883 |
ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; |
1884 |
ori_sg_count = ccb->csio.sglist_cnt; |
1885 |
|
1886 |
/* |
1887 |
* If it is READ operation, we should copy back the data |
1888 |
* to original SG list. |
1889 |
*/ |
1890 |
if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { |
1891 |
storvsc_copy_from_bounce_buf_to_sgl(ori_sglist, |
1892 |
ori_sg_count, |
1893 |
reqp->bounce_sgl, |
1894 |
reqp->not_aligned_seg_bits); |
1895 |
} |
1896 |
|
1897 |
storvsc_destroy_bounce_buffer(reqp->bounce_sgl); |
1898 |
reqp->bounce_sgl_count = 0; |
1899 |
} |
1900 |
|
1296 |
if (reqp->retries > 0) { |
1901 |
if (reqp->retries > 0) { |
1297 |
mtx_lock(&sc->hs_lock); |
1902 |
mtx_lock(&sc->hs_lock); |
1298 |
#if HVS_TIMEOUT_TEST |
1903 |
#if HVS_TIMEOUT_TEST |
Lines 1310-1316
Link Here
|
1310 |
mtx_unlock(&sc->hs_lock); |
1915 |
mtx_unlock(&sc->hs_lock); |
1311 |
} |
1916 |
} |
1312 |
|
1917 |
|
1313 |
/* |
1918 |
/* |
1314 |
* callout_drain() will wait for the timer handler to finish |
1919 |
* callout_drain() will wait for the timer handler to finish |
1315 |
* if it is running. So we don't need any lock to synchronize |
1920 |
* if it is running. So we don't need any lock to synchronize |
1316 |
* between this routine and the timer handler. |
1921 |
* between this routine and the timer handler. |