Lines 79-86
map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
Link Here
|
79 |
static void mfi_issue_pending_cmds_again (struct mfi_softc *sc); |
79 |
static void mfi_issue_pending_cmds_again (struct mfi_softc *sc); |
80 |
static void mfi_kill_hba (struct mfi_softc *sc); |
80 |
static void mfi_kill_hba (struct mfi_softc *sc); |
81 |
static void mfi_process_fw_state_chg_isr(void *arg); |
81 |
static void mfi_process_fw_state_chg_isr(void *arg); |
82 |
static void mfi_sync_map_complete(struct mfi_command *); |
|
|
83 |
static void mfi_queue_map_sync(struct mfi_softc *sc); |
84 |
|
82 |
|
85 |
#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) |
83 |
#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) |
86 |
|
84 |
|
Lines 1310-1316
mfi_process_fw_state_chg_isr(void *arg)
Link Here
|
1310 |
* Sync Map |
1308 |
* Sync Map |
1311 |
*/ |
1309 |
*/ |
1312 |
mfi_aen_setup(sc, sc->last_seq_num); |
1310 |
mfi_aen_setup(sc, sc->last_seq_num); |
1313 |
mfi_tbolt_sync_map_info(sc); |
|
|
1314 |
|
1311 |
|
1315 |
sc->issuepend_done = 1; |
1312 |
sc->issuepend_done = 1; |
1316 |
device_printf(sc->mfi_dev, "second stage of reset " |
1313 |
device_printf(sc->mfi_dev, "second stage of reset " |
Lines 1325-1500
mfi_process_fw_state_chg_isr(void *arg)
Link Here
|
1325 |
} |
1322 |
} |
1326 |
} |
1323 |
} |
1327 |
|
1324 |
|
1328 |
/* |
|
|
1329 |
* The ThunderBolt HW has an option for the driver to directly |
1330 |
* access the underlying disks and operate on the RAID. To |
1331 |
* do this there needs to be a capability to keep the RAID controller |
1332 |
* and driver in sync. The FreeBSD driver does not take advantage |
1333 |
* of this feature since it adds a lot of complexity and slows down |
1334 |
* performance. Performance is gained by using the controller's |
1335 |
* cache etc. |
1336 |
* |
1337 |
* Even though this driver doesn't access the disks directly, an |
1338 |
* AEN like command is used to inform the RAID firmware to "sync" |
1339 |
* with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This |
1340 |
* command in write mode will return when the RAID firmware has |
1341 |
* detected a change to the RAID state. Examples of this type |
1342 |
* of change are removing a disk. Once the command returns then |
1343 |
* the driver needs to acknowledge this and "sync" all LD's again. |
1344 |
* This repeats until we shutdown. Then we need to cancel this |
1345 |
* pending command. |
1346 |
* |
1347 |
* If this is not done right the RAID firmware will not remove a |
1348 |
* pulled drive and the RAID won't go degraded etc. Effectively, |
1349 |
* stopping any RAID mangement to functions. |
1350 |
* |
1351 |
* Doing another LD sync, requires the use of an event since the |
1352 |
* driver needs to do a mfi_wait_command and can't do that in an |
1353 |
* interrupt thread. |
1354 |
* |
1355 |
* The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO |
1356 |
* That requires a bunch of structure and it is simpler to just do |
1357 |
* the MFI_DCMD_LD_GET_LIST versus walking the RAID map. |
1358 |
*/ |
1359 |
|
1360 |
void |
1361 |
mfi_tbolt_sync_map_info(struct mfi_softc *sc) |
1362 |
{ |
1363 |
int error = 0, i; |
1364 |
struct mfi_command *cmd = NULL; |
1365 |
struct mfi_dcmd_frame *dcmd = NULL; |
1366 |
uint32_t context = 0; |
1367 |
union mfi_ld_ref *ld_sync = NULL; |
1368 |
size_t ld_size; |
1369 |
struct mfi_frame_header *hdr; |
1370 |
struct mfi_command *cm = NULL; |
1371 |
struct mfi_ld_list *list = NULL; |
1372 |
|
1373 |
mtx_assert(&sc->mfi_io_lock, MA_OWNED); |
1374 |
|
1375 |
if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort) |
1376 |
return; |
1377 |
|
1378 |
error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, |
1379 |
(void **)&list, sizeof(*list)); |
1380 |
if (error) |
1381 |
goto out; |
1382 |
|
1383 |
cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN; |
1384 |
|
1385 |
if (mfi_wait_command(sc, cm) != 0) { |
1386 |
device_printf(sc->mfi_dev, "Failed to get device listing\n"); |
1387 |
goto out; |
1388 |
} |
1389 |
|
1390 |
hdr = &cm->cm_frame->header; |
1391 |
if (hdr->cmd_status != MFI_STAT_OK) { |
1392 |
device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", |
1393 |
hdr->cmd_status); |
1394 |
goto out; |
1395 |
} |
1396 |
|
1397 |
ld_size = sizeof(*ld_sync) * list->ld_count; |
1398 |
ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF, |
1399 |
M_NOWAIT | M_ZERO); |
1400 |
if (ld_sync == NULL) { |
1401 |
device_printf(sc->mfi_dev, "Failed to allocate sync\n"); |
1402 |
goto out; |
1403 |
} |
1404 |
for (i = 0; i < list->ld_count; i++) |
1405 |
ld_sync[i].ref = list->ld_list[i].ld.ref; |
1406 |
|
1407 |
if ((cmd = mfi_dequeue_free(sc)) == NULL) { |
1408 |
device_printf(sc->mfi_dev, "Failed to get command\n"); |
1409 |
free(ld_sync, M_MFIBUF); |
1410 |
goto out; |
1411 |
} |
1412 |
|
1413 |
context = cmd->cm_frame->header.context; |
1414 |
bzero(cmd->cm_frame, sizeof(union mfi_frame)); |
1415 |
cmd->cm_frame->header.context = context; |
1416 |
|
1417 |
dcmd = &cmd->cm_frame->dcmd; |
1418 |
bzero(dcmd->mbox, MFI_MBOX_SIZE); |
1419 |
dcmd->header.cmd = MFI_CMD_DCMD; |
1420 |
dcmd->header.flags = MFI_FRAME_DIR_WRITE; |
1421 |
dcmd->header.timeout = 0; |
1422 |
dcmd->header.data_len = ld_size; |
1423 |
dcmd->header.scsi_status = 0; |
1424 |
dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO; |
1425 |
cmd->cm_sg = &dcmd->sgl; |
1426 |
cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; |
1427 |
cmd->cm_data = ld_sync; |
1428 |
cmd->cm_private = ld_sync; |
1429 |
|
1430 |
cmd->cm_len = ld_size; |
1431 |
cmd->cm_complete = mfi_sync_map_complete; |
1432 |
sc->mfi_map_sync_cm = cmd; |
1433 |
|
1434 |
cmd->cm_flags = MFI_CMD_DATAOUT; |
1435 |
cmd->cm_frame->dcmd.mbox[0] = list->ld_count; |
1436 |
cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; |
1437 |
|
1438 |
if ((error = mfi_mapcmd(sc, cmd)) != 0) { |
1439 |
device_printf(sc->mfi_dev, "failed to send map sync\n"); |
1440 |
free(ld_sync, M_MFIBUF); |
1441 |
sc->mfi_map_sync_cm = NULL; |
1442 |
mfi_release_command(cmd); |
1443 |
goto out; |
1444 |
} |
1445 |
|
1446 |
out: |
1447 |
if (list) |
1448 |
free(list, M_MFIBUF); |
1449 |
if (cm) |
1450 |
mfi_release_command(cm); |
1451 |
} |
1452 |
|
1453 |
static void |
1454 |
mfi_sync_map_complete(struct mfi_command *cm) |
1455 |
{ |
1456 |
struct mfi_frame_header *hdr; |
1457 |
struct mfi_softc *sc; |
1458 |
int aborted = 0; |
1459 |
|
1460 |
sc = cm->cm_sc; |
1461 |
mtx_assert(&sc->mfi_io_lock, MA_OWNED); |
1462 |
|
1463 |
hdr = &cm->cm_frame->header; |
1464 |
|
1465 |
if (sc->mfi_map_sync_cm == NULL) |
1466 |
return; |
1467 |
|
1468 |
if (sc->cm_map_abort || |
1469 |
hdr->cmd_status == MFI_STAT_INVALID_STATUS) { |
1470 |
sc->cm_map_abort = 0; |
1471 |
aborted = 1; |
1472 |
} |
1473 |
|
1474 |
free(cm->cm_data, M_MFIBUF); |
1475 |
wakeup(&sc->mfi_map_sync_cm); |
1476 |
sc->mfi_map_sync_cm = NULL; |
1477 |
mfi_release_command(cm); |
1478 |
|
1479 |
/* set it up again so the driver can catch more events */ |
1480 |
if (!aborted) |
1481 |
mfi_queue_map_sync(sc); |
1482 |
} |
1483 |
|
1484 |
static void |
1485 |
mfi_queue_map_sync(struct mfi_softc *sc) |
1486 |
{ |
1487 |
mtx_assert(&sc->mfi_io_lock, MA_OWNED); |
1488 |
taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task); |
1489 |
} |
1490 |
|
1491 |
void |
1492 |
mfi_handle_map_sync(void *context, int pending) |
1493 |
{ |
1494 |
struct mfi_softc *sc; |
1495 |
|
1496 |
sc = context; |
1497 |
mtx_lock(&sc->mfi_io_lock); |
1498 |
mfi_tbolt_sync_map_info(sc); |
1499 |
mtx_unlock(&sc->mfi_io_lock); |
1500 |
} |