Lines 346-351
Link Here
|
346 |
return error < 0 ? ENXIO : 0; |
346 |
return error < 0 ? ENXIO : 0; |
347 |
} |
347 |
} |
348 |
|
348 |
|
|
|
349 |
static int |
350 |
vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head) |
351 |
{ |
352 |
struct netmap_adapter *na = kring->na; |
353 |
struct ifnet *ifp = na->ifp; |
354 |
struct netmap_ring *ring = kring->ring; |
355 |
u_int ring_nr = kring->ring_id; |
356 |
u_int const lim = kring->nkr_num_slots - 1; |
357 |
u_int n; |
358 |
|
359 |
/* device-specific */ |
360 |
struct vtnet_softc *sc = ifp->if_softc; |
361 |
struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; |
362 |
struct virtqueue *vq = rxq->vtnrx_vq; |
363 |
|
364 |
/* use a local sglist, default might be short */ |
365 |
struct sglist_seg ss[2]; |
366 |
struct sglist sg = { ss, 0, 0, 2 }; |
367 |
|
368 |
for (n = 0; nm_i != head; n++) { |
369 |
static struct virtio_net_hdr_mrg_rxbuf hdr; |
370 |
struct netmap_slot *slot = &ring->slot[nm_i]; |
371 |
uint64_t paddr; |
372 |
void *addr = PNMB(na, slot, &paddr); |
373 |
int err = 0; |
374 |
|
375 |
if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ |
376 |
if (netmap_ring_reinit(kring)) |
377 |
return -1; |
378 |
} |
379 |
|
380 |
slot->flags &= ~NS_BUF_CHANGED; |
381 |
sglist_reset(&sg); // cheap |
382 |
err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size); |
383 |
err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); |
384 |
/* writable for the host */ |
385 |
err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg); |
386 |
if (err < 0) { |
387 |
D("virtqueue_enqueue failed"); |
388 |
break; |
389 |
} |
390 |
nm_i = nm_next(nm_i, lim); |
391 |
} |
392 |
return nm_i; |
393 |
} |
394 |
|
349 |
/* Reconcile kernel and user view of the receive ring. */ |
395 |
/* Reconcile kernel and user view of the receive ring. */ |
350 |
static int |
396 |
static int |
351 |
vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) |
397 |
vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) |
Lines 510-515
Link Here
|
510 |
info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); |
556 |
info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); |
511 |
|
557 |
|
512 |
return 0; |
558 |
return 0; |
|
|
559 |
} |
560 |
|
561 |
/* Make RX virtqueues buffers pointing to netmap buffers. */ |
562 |
static int |
563 |
vtnet_netmap_init_rx_buffers(struct vtnet_softc *sc) |
564 |
{ |
565 |
struct ifnet *ifp = sc->vtnet_ifp; |
566 |
struct netmap_adapter* na = NA(ifp); |
567 |
unsigned int r; |
568 |
|
569 |
if (!nm_native_on(na)) |
570 |
return 0; |
571 |
for (r = 0; r < na->num_rx_rings; r++) { |
572 |
struct netmap_kring *kring = na->rx_rings[r]; |
573 |
struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r]; |
574 |
struct virtqueue *vq = rxq->vtnrx_vq; |
575 |
struct netmap_slot* slot; |
576 |
int err = 0; |
577 |
|
578 |
slot = netmap_reset(na, NR_RX, r, 0); |
579 |
if (!slot) { |
580 |
D("strange, null netmap ring %d", r); |
581 |
return 0; |
582 |
} |
583 |
/* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue. |
584 |
* It's important to leave one virtqueue slot free, otherwise |
585 |
* we can run into ring->cur/ring->tail wraparounds. |
586 |
*/ |
587 |
err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1); |
588 |
if (err < 0) |
589 |
return 0; |
590 |
virtqueue_notify(vq); |
591 |
} |
592 |
|
593 |
return 1; |
513 |
} |
594 |
} |
514 |
|
595 |
|
515 |
static void |
596 |
static void |