--- sys/dev/virtio/network/if_vtnetvar.h.ori 2020-01-08 19:57:18.670256000 -0800 +++ sys/dev/virtio/network/if_vtnetvar.h 2020-01-08 19:58:47.614309220 -0800 @@ -82,6 +82,9 @@ struct taskqueue *vtnrx_tq; struct task vtnrx_intrtask; struct lro_ctrl vtnrx_lro; +#ifdef DEV_NETMAP + struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr; +#endif /* DEV_NETMAP */ char vtnrx_name[16]; } __aligned(CACHE_LINE_SIZE); @@ -118,6 +121,9 @@ #ifndef VTNET_LEGACY_TX struct task vtntx_defrtask; #endif +#ifdef DEV_NETMAP + struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr; +#endif /* DEV_NETMAP */ char vtntx_name[16]; } __aligned(CACHE_LINE_SIZE); --- sys/dev/netmap/if_vtnet_netmap.h.ori 2020-01-08 20:41:23.318408000 -0800 +++ sys/dev/netmap/if_vtnet_netmap.h 2020-01-08 21:04:04.407345486 -0800 @@ -346,6 +346,52 @@ return error < 0 ? ENXIO : 0; } +static int +vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head) +{ + struct netmap_adapter *na = kring->na; + struct ifnet *ifp = na->ifp; + struct netmap_ring *ring = kring->ring; + u_int ring_nr = kring->ring_id; + u_int const lim = kring->nkr_num_slots - 1; + u_int n; + + /* device-specific */ + struct vtnet_softc *sc = ifp->if_softc; + struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; + struct virtqueue *vq = rxq->vtnrx_vq; + + /* use a local sglist, default might be short */ + struct sglist_seg ss[2]; + struct sglist sg = { ss, 0, 0, 2 }; + + for (n = 0; nm_i != head; n++) { + static struct virtio_net_hdr_mrg_rxbuf hdr; + struct netmap_slot *slot = &ring->slot[nm_i]; + uint64_t paddr; + void *addr = PNMB(na, slot, &paddr); + int err = 0; + + if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ + if (netmap_ring_reinit(kring)) + return -1; + } + + slot->flags &= ~NS_BUF_CHANGED; + sglist_reset(&sg); // cheap + err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size); + err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); + /* writable for the host */ + err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg); + if (err < 0) { + D("virtqueue_enqueue failed"); + break; + } + nm_i = nm_next(nm_i, lim); + } + return nm_i; +} + /* Reconcile kernel and user view of the receive ring. */ static int vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) @@ -510,6 +556,41 @@ info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); return 0; +} + +/* Make RX virtqueues buffers pointing to netmap buffers. */ +static int +vtnet_netmap_init_rx_buffers(struct vtnet_softc *sc) +{ + struct ifnet *ifp = sc->vtnet_ifp; + struct netmap_adapter* na = NA(ifp); + unsigned int r; + + if (!nm_native_on(na)) + return 0; + for (r = 0; r < na->num_rx_rings; r++) { + struct netmap_kring *kring = na->rx_rings[r]; + struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r]; + struct virtqueue *vq = rxq->vtnrx_vq; + struct netmap_slot* slot; + int err = 0; + + slot = netmap_reset(na, NR_RX, r, 0); + if (!slot) { + D("strange, null netmap ring %d", r); + return 0; + } + /* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue. + * It's important to leave one virtqueue slot free, otherwise + * we can run into ring->cur/ring->tail wraparounds. + */ + err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1); + if (err < 0) + return 0; + virtqueue_notify(vq); + } + + return 1; } static void