View | Details | Raw Unified | Return to bug 256317
Collapse All | Expand All

(-)b/usr.sbin/bhyve/pci_nvme.c (-5 / +50 lines)
Lines 339-344 static void pci_nvme_io_done(struct blockif_req *, int); Link Here
339
	((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
339
	((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
340
340
341
#define	NVME_CSTS_RDY	(1 << NVME_CSTS_REG_RDY_SHIFT)
341
#define	NVME_CSTS_RDY	(1 << NVME_CSTS_REG_RDY_SHIFT)
342
#define	NVME_CSTS_CFS	(1 << NVME_CSTS_REG_CFS_SHIFT)
342
343
343
/* Completion Queue status word utils */
344
/* Completion Queue status word utils */
344
#define	NVME_STATUS_P	(1 << NVME_STATUS_P_SHIFT)
345
#define	NVME_STATUS_P	(1 << NVME_STATUS_P_SHIFT)
Lines 768-797 pci_nvme_reset(struct pci_nvme_softc *sc) Link Here
768
	pthread_mutex_unlock(&sc->mtx);
769
	pthread_mutex_unlock(&sc->mtx);
769
}
770
}
770
771
771
static void
772
static int
772
pci_nvme_init_controller(struct vmctx *ctx, struct pci_nvme_softc *sc)
773
pci_nvme_init_controller(struct vmctx *ctx, struct pci_nvme_softc *sc)
773
{
774
{
774
	uint16_t acqs, asqs;
775
	uint16_t acqs, asqs;
775
776
776
	DPRINTF("%s", __func__);
777
	DPRINTF("%s", __func__);
777
778
778
	asqs = (sc->regs.aqa & NVME_AQA_REG_ASQS_MASK) + 1;
779
	/*
780
	 * NVMe 2.0 states that "enabling a controller while this field is
781
	 * cleared to 0h produces undefined results" for both ACQS and
782
	 * ASQS. If zero, set CFS and do not become ready.
783
	 */
784
	asqs = ONE_BASED(sc->regs.aqa & NVME_AQA_REG_ASQS_MASK);
785
	if (asqs < 2) {
786
		EPRINTLN("%s: illegal ASQS value %#x (aqa=%#x)", __func__,
787
		    asqs - 1, sc->regs.aqa);
788
		sc->regs.csts |= NVME_CSTS_CFS;
789
		return (-1);
790
	}
779
	sc->submit_queues[0].size = asqs;
791
	sc->submit_queues[0].size = asqs;
780
	sc->submit_queues[0].qbase = vm_map_gpa(ctx, sc->regs.asq,
792
	sc->submit_queues[0].qbase = vm_map_gpa(ctx, sc->regs.asq,
781
	            sizeof(struct nvme_command) * asqs);
793
	            sizeof(struct nvme_command) * asqs);
794
	if (sc->submit_queues[0].qbase == NULL) {
795
		EPRINTLN("%s: ASQ vm_map_gpa(%lx) failed", __func__,
796
		    sc->regs.asq);
797
		sc->regs.csts |= NVME_CSTS_CFS;
798
		return (-1);
799
	}
782
800
783
	DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
801
	DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
784
	        __func__, sc->regs.asq, sc->submit_queues[0].qbase);
802
	        __func__, sc->regs.asq, sc->submit_queues[0].qbase);
785
803
786
	acqs = ((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) & 
804
	acqs = ONE_BASED((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) & 
787
	    NVME_AQA_REG_ACQS_MASK) + 1;
805
	    NVME_AQA_REG_ACQS_MASK);
806
	if (acqs < 2) {
807
		EPRINTLN("%s: illegal ACQS value %#x (aqa=%#x)", __func__,
808
		    acqs - 1, sc->regs.aqa);
809
		sc->regs.csts |= NVME_CSTS_CFS;
810
		return (-1);
811
	}
788
	sc->compl_queues[0].size = acqs;
812
	sc->compl_queues[0].size = acqs;
789
	sc->compl_queues[0].qbase = vm_map_gpa(ctx, sc->regs.acq,
813
	sc->compl_queues[0].qbase = vm_map_gpa(ctx, sc->regs.acq,
790
	         sizeof(struct nvme_completion) * acqs);
814
	         sizeof(struct nvme_completion) * acqs);
815
	if (sc->compl_queues[0].qbase == NULL) {
816
		EPRINTLN("%s: ACQ vm_map_gpa(%lx) failed", __func__,
817
		    sc->regs.acq);
818
		sc->regs.csts |= NVME_CSTS_CFS;
819
		return (-1);
820
	}
791
	sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
821
	sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
792
822
793
	DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
823
	DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
794
	        __func__, sc->regs.acq, sc->compl_queues[0].qbase);
824
	        __func__, sc->regs.acq, sc->compl_queues[0].qbase);
825
826
	return (0);
795
}
827
}
796
828
797
static int
829
static int
Lines 2415-2420 pci_nvme_write_bar_0(struct vmctx *ctx, struct pci_nvme_softc* sc, Link Here
2415
		uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2447
		uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2416
		int is_sq = (belloffset % 8) < 4;
2448
		int is_sq = (belloffset % 8) < 4;
2417
2449
2450
		if ((sc->regs.csts & NVME_CSTS_RDY) == 0) {
2451
			WPRINTF("doorbell write prior to RDY (offset=%#lx)\n",
2452
			    offset);
2453
			return;
2454
		}
2455
2418
		if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2456
		if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2419
			WPRINTF("guest attempted an overflow write offset "
2457
			WPRINTF("guest attempted an overflow write offset "
2420
			         "0x%lx, val 0x%lx in %s",
2458
			         "0x%lx, val 0x%lx in %s",
Lines 2422-2427 pci_nvme_write_bar_0(struct vmctx *ctx, struct pci_nvme_softc* sc, Link Here
2422
			return;
2460
			return;
2423
		}
2461
		}
2424
2462
2463
		if (is_sq) {
2464
			if (sc->submit_queues[idx].qbase == NULL)
2465
				return;
2466
		} else if (sc->compl_queues[idx].qbase == NULL)
2467
			return;
2468
2425
		pci_nvme_handle_doorbell(ctx, sc, idx, is_sq, value);
2469
		pci_nvme_handle_doorbell(ctx, sc, idx, is_sq, value);
2426
		return;
2470
		return;
2427
	}
2471
	}
Lines 2488-2494 pci_nvme_write_bar_0(struct vmctx *ctx, struct pci_nvme_softc* sc, Link Here
2488
			sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
2532
			sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
2489
			sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
2533
			sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
2490
			sc->regs.csts &= ~NVME_CSTS_RDY;
2534
			sc->regs.csts &= ~NVME_CSTS_RDY;
2491
		} else if (sc->pending_ios == 0) {
2535
		} else if ((sc->pending_ios == 0) &&
2536
		    !(sc->regs.csts & NVME_CSTS_CFS)) {
2492
			sc->regs.csts |= NVME_CSTS_RDY;
2537
			sc->regs.csts |= NVME_CSTS_RDY;
2493
		}
2538
		}
2494
		break;
2539
		break;

Return to bug 256317